diff --git a/.cursor/rules/keep-ui-react-typescript.mdc b/.cursor/rules/keep-ui-react-typescript.mdc new file mode 100644 index 0000000000..c85f89f07b --- /dev/null +++ b/.cursor/rules/keep-ui-react-typescript.mdc @@ -0,0 +1,88 @@ +--- +description: +globs: +alwaysApply: true +--- +--- +description: Rules for writing frontend code at Keep (React + Typescript) +globs: keep-ui/**/*.tsx, keep-ui/**/*.ts +--- + +You are an expert in TypeScript, React, Next.js, SWR, Tailwind, and UX design. + +# Achitecture +Use Feature-Slice Design Convention with modification: instead of `pages` and `app` we use default Next.js route-based folder structure. + +Example: +- entities/ + - incidents/ + - api/ + - lib/ + - model/ + - ui/ + +Top-level folders, called Layers: +- widgets +- features +- entities +- shared + +Each layer has segments, e.g. "entities/users". + +Each segment has slices +- ui — everything related to UI display: UI components, date formatters, styles, etc. +- api — backend interactions: request functions, data types, mappers, etc. +- model — the data model: schemas, interfaces, stores, and business logic. +- lib — library code that other modules on this slice need. +- config — configuration files and feature flags. + +# Code Style and Structure +- Write TypeScript with proper typing for all new code +- Use functional programming patterns; avoid classes +- Prefer iteration and modularization over code duplication. +- Use descriptive variable names with auxiliary verbs (e.g., isLoading, hasError). +- Don't use `useEffect` where you can use ref function for dom-dependent things (e.g. ref={el => ...}) +- Don't use `useState` where you can infer from props +- Use named exports; avoid default exports +- If you need to create new base component, first look at existing ones in `@/shared/ui` + +# Naming Conventions +- Always look around the codebase for naming conventions, and follow the best practices of the environment (e.g. use `camelCase` variables in JS). +- Use clear, yet functional names (`searchResults` vs `data`). +- React components are PascalCase (`IncidentList`). +- Props for components and hooks are PascalCase and end with `Props`, e.g. `WorkflowBuilderWidgetProps`, return value for hooks is PascalCase and end with `Value`, e.g. `UseIncidentActionsValue` +- Name the `.ts` file according to its main export: `IncidentList.ts` or `IncidentList.tsx` or `useIncidents.ts`. Pay attention to the case. +- Avoid `index.ts`, `styles.css`, and other generic names, even if this is the only file in a directory. + +# Data Fetching +- Use useSWR for fetching data, create or extend hooks in @/entities//model/use.ts which encapsulates fetching logic +- Create a dedicated keys file @/entities//lib/Keys.ts to manage SWR cache keys. Structure it as an object with methods for different operations: +```export const entityKeys = { + all: "entityName", + list: (query: QueryParams) => [...], + detail: (id: string) => [...], + getListMatcher: () => (key: any) => boolean +}``` +- For query-based endpoints, construct cache keys by joining parameters with "::", filtering out falsy values: +```list: (query: QueryParams) => [ + entityKeys.all, + "list", + query.param1, + query.param2 +].filter(Boolean).join("::")``` +- For create, update, delete actions: + - Create or extend hook in @/entities//model/useActions.ts + - Create a dedicated revalidation hook (e.g., useRevalidation.ts) to handle cache invalidation + - Revalidate both specific items and list queries after mutations + - Include success/error toast notifications for user feedback + - Handle file uploads and other complex operations within the actions hook + +# UI and Styling +- Use Tailwind CSS as primary styling solution +- For non-Tailwind cases: + - Use CSS with component-specific files + - Namespace under component class (.DropdownMenu) + - Follow BEM for modals (.DropdownMenu__modal) + - Import styles directly (import './DropdownMenu.css') +- Replace custom CSS with Tailwind when possible + diff --git a/.cursor/rules/keep-ui-tests.mdc b/.cursor/rules/keep-ui-tests.mdc new file mode 100644 index 0000000000..4042d17cc5 --- /dev/null +++ b/.cursor/rules/keep-ui-tests.mdc @@ -0,0 +1,18 @@ +--- +description: +globs: +alwaysApply: true +--- +--- +description: Rules and guidelines for writing and running React tests +globs: *.spec.tsx, *.test.tsx, *.test.ts, *.spec.ts +--- + +# Writing frontend tests + +Place tests in __tests__ folder in the module, e.g. tests for file `/features/workflows/model/useWorkflows.tsx` should be `/features/workflows/models/__tests__/useWorkflows.test.tsx` + +# Running frontend tests + +Please run tests with command: npm run test in keep-ui folder +For example: cd keep-ui && npm run test \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index dd84ea7824..384985b3f7 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,10 +1,9 @@ --- name: Bug report about: Create a report to help us improve -title: '' -labels: '' -assignees: '' - +title: "[🐛 Bug]: " +labels: "" +assignees: "" --- **Describe the bug** @@ -12,6 +11,7 @@ A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior: + 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' @@ -23,16 +23,5 @@ A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. -**Desktop (please complete the following information):** - - OS: [e.g. iOS] - - Browser [e.g. chrome, safari] - - Version [e.g. 22] - -**Smartphone (please complete the following information):** - - Device: [e.g. iPhone6] - - OS: [e.g. iOS8.1] - - Browser [e.g. stock browser, safari] - - Version [e.g. 22] - **Additional context** Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000..75d8734818 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,14 @@ +blank_issues_enabled: true + +contact_links: + - name: Support + url: https://github.com/keephq/keep/discussions + about: Get help! Ask questions, get support, and share ideas. + + - name: Chat + url: https://slack.keephq.dev + about: Engage with the Keep team and other community members over Slack. + + - name: Twitter + url: https://twitter.com/keepalerting + about: Follow us and stay up to date with Keep. diff --git a/.github/ISSUE_TEMPLATE/documentation.md b/.github/ISSUE_TEMPLATE/documentation.md new file mode 100644 index 0000000000..cc2c7fd0e5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/documentation.md @@ -0,0 +1,10 @@ +--- +name: Documentation issue +about: Any issue related with Keep's documentation +title: "[📃 Docs]: " +labels: "Documentation" +assignees: "" +--- + +**Describe the documentation change** +Add any context about the documentation change you aim to do. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index bbcbbe7d61..a9a650ab93 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -1,10 +1,9 @@ --- name: Feature request about: Suggest an idea for this project -title: '' -labels: '' -assignees: '' - +title: "[➕ Feature]: " +labels: "" +assignees: "" --- **Is your feature request related to a problem? Please describe.** diff --git a/.github/ISSUE_TEMPLATE/new_provider_request.md b/.github/ISSUE_TEMPLATE/new_provider_request.md index 8f699c9e20..4c9b6c99d0 100644 --- a/.github/ISSUE_TEMPLATE/new_provider_request.md +++ b/.github/ISSUE_TEMPLATE/new_provider_request.md @@ -1,10 +1,9 @@ --- name: New provider request about: Suggest a new provider for keep -title: '' -labels: 'provider' -assignees: '' - +title: "[🔌 Provider]: " +labels: "Provider" +assignees: "" --- **Describe the provider you want to add** diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml new file mode 100644 index 0000000000..a5892cc36e --- /dev/null +++ b/.github/workflows/auto-release.yml @@ -0,0 +1,53 @@ +name: Auto Release on Version Change + +on: + push: + branches: + - main + paths: + - "pyproject.toml" + +jobs: + check-and-release: + runs-on: ubuntu-latest + permissions: + contents: write + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.11" + + - name: Extract version from pyproject.toml + id: get_version + run: | + VERSION=$(grep '^version = ' pyproject.toml | sed 's/version = "\(.*\)"/\1/') + echo "version=$VERSION" >> $GITHUB_OUTPUT + + - name: Check if release exists + id: check_release + run: | + TAG_EXISTS=$(git tag -l "v${{ steps.get_version.outputs.version }}") + if [ -z "$TAG_EXISTS" ]; then + echo "exists=false" >> $GITHUB_OUTPUT + else + echo "exists=true" >> $GITHUB_OUTPUT + fi + + - name: Create Release + if: steps.check_release.outputs.exists == 'false' + uses: softprops/action-gh-release@v1 + with: + tag_name: v${{ steps.get_version.outputs.version }} + name: Release v${{ steps.get_version.outputs.version }} + generate_release_notes: true + draft: false + prerelease: false + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/auto-resolve-keep.yml b/.github/workflows/auto-resolve-keep.yml new file mode 100644 index 0000000000..63442ad95b --- /dev/null +++ b/.github/workflows/auto-resolve-keep.yml @@ -0,0 +1,77 @@ +name: Auto resolve Keep incident/alert + +on: + workflow_dispatch: + inputs: + incident_id: + description: "Keep incident ID to resolve" + required: false + type: string + alert_fingerprint: + description: "Keep alert fingerprint to resolve" + required: false + type: string + status: + description: "Status to set" + required: false + type: string + default: "resolved" + pull_request: + types: [closed] + branches: + - main + +jobs: + auto-resolve-keep: + runs-on: ubuntu-latest + steps: + - name: Extract Keep ID from PR description + if: github.event_name == 'pull_request' + id: extract_id + run: | + PR_DESC="${{ github.event.pull_request.body }}" + INCIDENT_ID=$(echo "$PR_DESC" | grep -ioP 'close keep incident:\s*\K[a-f0-9-]+' || true) + ALERT_FINGERPRINT=$(echo "$PR_DESC" | grep -ioP 'close keep alert:\s*\K[a-f0-9-]+' || true) + echo "incident_id=$INCIDENT_ID" >> $GITHUB_OUTPUT + echo "alert_fingerprint=$ALERT_FINGERPRINT" >> $GITHUB_OUTPUT + + - name: Set final IDs + id: set_ids + run: | + FINAL_INCIDENT_ID="${{ inputs.incident_id || steps.extract_id.outputs.incident_id }}" + FINAL_ALERT_FINGERPRINT="${{ inputs.alert_fingerprint || steps.extract_id.outputs.alert_fingerprint }}" + echo "final_incident_id=$FINAL_INCIDENT_ID" >> $GITHUB_OUTPUT + echo "final_alert_fingerprint=$FINAL_ALERT_FINGERPRINT" >> $GITHUB_OUTPUT + + - name: Auto resolve Keep incident + if: | + (github.event_name == 'pull_request' && github.event.pull_request.merged == true && steps.set_ids.outputs.final_incident_id != '') || + (github.event_name == 'workflow_dispatch' && inputs.incident_id != '') + uses: fjogeleit/http-request-action@v1 + with: + url: "https://api.keephq.dev/incidents/${{ steps.set_ids.outputs.final_incident_id }}/status" + method: "POST" + customHeaders: '{"X-API-KEY": "${{ secrets.KEEP_API_KEY }}", "Content-Type": "application/json"}' + data: '{"status": "${{ inputs.status || ''resolved'' }}"}' + + - name: Auto enrich Keep incident + if: | + (github.event_name == 'pull_request' && github.event.pull_request.merged == true && steps.set_ids.outputs.final_incident_id != '') || + (github.event_name == 'workflow_dispatch' && inputs.incident_id != '') + uses: fjogeleit/http-request-action@v1 + with: + url: "https://api.keephq.dev/incidents/${{ steps.set_ids.outputs.final_incident_id }}/enrich" + method: "POST" + customHeaders: '{"X-API-KEY": "${{ secrets.KEEP_API_KEY }}", "Content-Type": "application/json"}' + data: '{"enrichments":{"incident_title":"${{ github.event.pull_request.title || ''Manual resolution'' }}","incident_url":"${{ github.event.pull_request.html_url || github.server_url }}//${{ github.repository }}/actions/runs/${{ github.run_id }}", "incident_id": "${{ github.run_id }}", "incident_provider": "github"}}' + + - name: Auto resolve Keep alert + if: | + (github.event_name == 'pull_request' && github.event.pull_request.merged == true && steps.set_ids.outputs.final_alert_fingerprint != '') || + (github.event_name == 'workflow_dispatch' && inputs.alert_fingerprint != '') + uses: fjogeleit/http-request-action@v1 + with: + url: "https://api.keephq.dev/alerts/enrich?dispose_on_new_alert=true" + method: "POST" + customHeaders: '{"Content-Type": "application/json", "X-API-KEY": "${{ secrets.KEEP_API_KEY }}"}' + data: '{"enrichments":{"status":"${{ inputs.status || ''resolved'' }}","dismissed":false,"dismissUntil":"","note":"${{ github.event.pull_request.title || ''Manual resolution'' }}","ticket_url":"${{ github.event.pull_request.html_url || github.server_url }}//${{ github.repository }}/actions/runs/${{ github.run_id }}"},"fingerprint":"${{ steps.set_ids.outputs.final_alert_fingerprint }}"}' diff --git a/.github/workflows/but-to-project.yml b/.github/workflows/but-to-project.yml new file mode 100644 index 0000000000..3b397573e0 --- /dev/null +++ b/.github/workflows/but-to-project.yml @@ -0,0 +1,17 @@ +name: Add bugs to project board + +on: + issues: + types: + - labeled + +jobs: + add-to-project: + name: Add bug to project board + runs-on: ubuntu-latest + if: github.event.label.name == 'Bug' + steps: + - uses: actions/add-to-project@v0.5.0 + with: + project-url: https://github.com/orgs/keephq/projects/11 + github-token: ${{ secrets.ADD_TO_PROJECT_PAT }} diff --git a/.github/workflows/developer-onboarding-notification.yml b/.github/workflows/developer-onboarding-notification.yml new file mode 100644 index 0000000000..8a35fb308d --- /dev/null +++ b/.github/workflows/developer-onboarding-notification.yml @@ -0,0 +1,107 @@ +name: Celebrating Contributions + +on: + pull_request_target: + types: [closed] + +permissions: + pull-requests: write + +jobs: + comment_on_merged_pull_request: + if: github.event.pull_request.merged == true + runs-on: ubuntu-latest + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + + - name: Set Environment Variables + env: + AUTHOR: ${{ github.event.pull_request.user.login }} + REPO: ${{ github.event.repository.name }} + OWNER: ${{ github.event.repository.owner.login }} + run: | + echo "AUTHOR=${AUTHOR}" >> $GITHUB_ENV + echo "REPO=${REPO}" >> $GITHUB_ENV + echo "OWNER=${OWNER}" >> $GITHUB_ENV + + - name: Count Merged Pull Requests + id: count_merged_pull_requests + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + try { + const author = process.env.AUTHOR; + const repo = process.env.REPO; + const owner = process.env.OWNER; + const { data } = await github.rest.search.issuesAndPullRequests({ + q: `repo:${owner}/${repo} type:pr state:closed author:${author}` + }); + const prCount = data.items.filter(pr => pr.pull_request.merged_at).length; + core.exportVariable('PR_COUNT', prCount); + } catch (error) { + core.setFailed(`Error counting merged pull requests: ${error.message}`); + } + + - name: Comment on the Merged Pull Request + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + try { + const prCount = parseInt(process.env.PR_COUNT); + const author = process.env.AUTHOR; + const prNumber = context.payload.pull_request.number; + const repo = process.env.REPO; + + function getRandomEmoji() { + const emojis = ['🎉', '🚀', '💪', '🌟', '🏆', '🎊', '🔥', '👏', '🌈', '🚂']; + return emojis[Math.floor(Math.random() * emojis.length)]; + } + + function getMessage(count) { + const emoji = getRandomEmoji(); + switch(count) { + case 1: + return `${emoji} **Fantastic work @${author}!** Your very first PR to ${repo} has been merged! 🎉🥳\n\n` + + `You've just taken your first step into open-source, and we couldn't be happier to have you onboard. 🙌\n` + + `If you're feeling adventurous, why not dive into another issue and keep contributing? The community would love to see more from you! 🚀\n\n` + + `For any support, feel free to reach out on the community: https://slack.keephq.dev. Happy coding! 👩‍💻👨‍💻`; + case 2: + return `${emoji} **Well done @${author}!** Two PRs merged already! 🎉🥳\n\n` + + `With your second PR, you're on a roll, and your contributions are already making a difference. 🌟\n` + + `Looking forward to seeing even more contributions from you. See you in Slack https://slack.keephq.dev 🚀`; + case 3: + return `${emoji} **You're on fire, @${author}!** Three PRs merged and counting! 🔥🎉\n\n` + + `Your consistent contributions are truly impressive. You're becoming a valued member of our community! 💖\n` + + `Have you considered taking on some more challenging issues? We'd love to see what you can do! 💪\n\n` + + `Remember, the team is always here to support you. Keep blazing that trail! 🚀`; + case 5: + return `${emoji} **High five, @${author}!** You've hit the incredible milestone of 5 merged PRs! 🖐️✨\n\n` + + `Your dedication to ${repo} is outstanding. You're not just contributing code; you're shaping the future of this project! 🌠\n` + + `We'd love to hear your thoughts on the project. Any ideas for new features or improvements? 🤔\n\n` + + `The whole team applaud your efforts. You're a superstar! 🌟`; + case 10: + return `${emoji} **Double digits, @${author}!** 10 merged PRs is a massive achievement! 🏆🎊\n\n` + + `Your impact on ${repo} is undeniable. You've become a pillar of our community! 🏛️\n` + + `We'd be thrilled to have you take on a mentorship role for newer contributors. Interested? 🧑‍🏫\n\n` + + `Everyone here are in awe of your contributions. You're an open source hero! 🦸‍♀️🦸‍♂️`; + default: + return ""; + } + } + + const message = getMessage(prCount); + + if (message) { + await github.rest.issues.createComment({ + owner: process.env.OWNER, + repo: process.env.REPO, + issue_number: prNumber, + body: message + }); + } + } catch (error) { + core.setFailed(`Error creating comment: ${error.message}`); + } diff --git a/.github/workflows/lint-pr.yml b/.github/workflows/lint-pr.yml index 2d7d611d13..9d8e7d052d 100644 --- a/.github/workflows/lint-pr.yml +++ b/.github/workflows/lint-pr.yml @@ -6,6 +6,10 @@ on: - opened - edited - synchronize + - reopened + +permissions: + pull-requests: write # Add explicit permissions for PR comments jobs: main: @@ -39,3 +43,17 @@ jobs: with: header: pr-title-lint-error delete: true + links: + runs-on: ubuntu-latest + name: Validate PR to Issue link + permissions: + issues: read + pull-requests: write + steps: + - uses: nearform-actions/github-action-check-linked-issues@v1 + id: check-linked-issues + with: + exclude-branches: "release/**, dependabot/**" + # OPTIONAL: Use the output from the `check-linked-issues` step + - name: Get the output + run: echo "How many linked issues? ${{ steps.check-linked-issues.outputs.linked_issues_count }}" diff --git a/.github/workflows/release-workflow-schema.yml b/.github/workflows/release-workflow-schema.yml new file mode 100644 index 0000000000..6aeeeb7c4a --- /dev/null +++ b/.github/workflows/release-workflow-schema.yml @@ -0,0 +1,174 @@ +name: Release JSON Schema + +on: + push: + branches: + - main + paths: + - ".github/workflows/release-workflow-schema.yml" + - "pyproject.toml" + - "keep/providers/**" + - "keep-ui/entities/workflows/model/yaml.schema.ts" + pull_request: + paths: + - ".github/workflows/release-workflow-schema.yml" + - "pyproject.toml" + - "keep/providers/**" + - "keep-ui/entities/workflows/model/yaml.schema.ts" + workflow_dispatch: + +env: + PYTHON_VERSION: 3.11 + STORAGE_MANAGER_DIRECTORY: /tmp/storage-manager + SCHEMA_REPO_NAME: keephq/keep-workflow-schema +jobs: + generate-schema: + runs-on: ubuntu-latest + permissions: + contents: read + + outputs: + version: ${{ steps.get_version.outputs.version }} + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Extract version from pyproject.toml + id: get_version + run: | + VERSION=$(grep '^version = ' pyproject.toml | sed 's/version = "\(.*\)"/\1/') + echo "version=$VERSION" >> $GITHUB_OUTPUT + + - name: Set up Python ${{ env.PYTHON_VERSION }} + uses: actions/setup-python@v4 + with: + python-version: "3.11" + + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + virtualenvs-create: true + virtualenvs-in-project: true + + - name: Cache dependencies + id: cache-deps + uses: actions/cache@v4.2.0 + with: + path: .venv + key: pydeps-${{ hashFiles('**/poetry.lock') }} + + - name: Install dependencies using poetry + run: poetry install --no-interaction --no-root --with dev + + - name: Save providers list + run: | + PYTHONPATH="${{ github.workspace }}" poetry run python ./scripts/save_providers_list.py + + - name: Set up Node.js 20 + uses: actions/setup-node@v3 + with: + node-version: 20 + cache: "npm" + cache-dependency-path: keep-ui/package-lock.json + + - name: Install Node dependencies + working-directory: keep-ui + run: npm ci + + - name: Generate JSON Schema + working-directory: keep-ui + run: npm run build:workflow-yaml-json-schema + + - name: Upload schema artifact + uses: actions/upload-artifact@v4 + with: + name: workflow-schema + path: workflow-yaml-json-schema.json + + release-schema: + runs-on: ubuntu-latest + needs: generate-schema + if: ${{ github.event_name != 'pull_request' || !github.event.pull_request.head.repo.fork }} + + steps: + - name: Download schema artifact + uses: actions/download-artifact@v4 + with: + name: workflow-schema + path: . + - name: Checkout schema repository + uses: actions/checkout@v4 + with: + repository: ${{ env.SCHEMA_REPO_NAME }} + token: ${{ secrets.SCHEMA_REPO_PAT }} + path: schema-repo + + - name: Set target branch variable + id: set_branch + run: | + if [ "${{ github.event_name }}" = "pull_request" ]; then + echo "branch=${{ github.head_ref }}" >> $GITHUB_OUTPUT + else + echo "branch=${{ github.ref_name }}" >> $GITHUB_OUTPUT + fi + + - name: Create or switch to target branch in schema repo + working-directory: schema-repo + run: | + git fetch origin + if git show-ref --verify --quiet refs/heads/${{ steps.set_branch.outputs.branch }}; then + git checkout ${{ steps.set_branch.outputs.branch }} + else + git checkout -b ${{ steps.set_branch.outputs.branch }} + fi + + - name: Copy schema to target repository + run: | + cp workflow-yaml-json-schema.json schema-repo/schema.json + + # Update schema with version info + jq --arg version "${{ needs.generate-schema.outputs.version }}" \ + --arg id "https://raw.githubusercontent.com/${{ env.SCHEMA_REPO_NAME }}/v${{ needs.generate-schema.outputs.version }}/schema.json" \ + '. + {version: $version, "$id": $id}' \ + schema-repo/schema.json > schema-repo/schema.tmp.json + + mv schema-repo/schema.tmp.json schema-repo/schema.json + + - name: Check if schema changed + id: check_changes + working-directory: schema-repo + run: | + git add schema.json + if git diff --cached --quiet schema.json; then + echo "changed=false" >> $GITHUB_OUTPUT + else + echo "changed=true" >> $GITHUB_OUTPUT + fi + + - name: Commit and push schema + if: steps.check_changes.outputs.changed == 'true' + working-directory: schema-repo + run: | + git config user.name "Keep Schema Bot" + git config user.email "no-reply@keephq.dev" + git commit -m "Release schema v${{ needs.generate-schema.outputs.version }}" + git push origin ${{ steps.set_branch.outputs.branch }} + if [ "${{ steps.set_branch.outputs.branch }}" = "main" ]; then + git tag "v${{ needs.generate-schema.outputs.version }}" + git push origin "v${{ needs.generate-schema.outputs.version }}" + fi + + - name: Create GitHub Release + if: steps.check_changes.outputs.changed == 'true' && steps.set_branch.outputs.branch == 'main' + uses: softprops/action-gh-release@v1 + with: + repository: ${{ env.SCHEMA_REPO_NAME }} + tag_name: v${{ needs.generate-schema.outputs.version }} + name: Release v${{ needs.generate-schema.outputs.version }} + body: | + Automated release of schema version v${{ needs.generate-schema.outputs.version }}. + env: + GITHUB_TOKEN: ${{ secrets.SCHEMA_REPO_PAT }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 64a0b202b1..d4020f083c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,18 +10,49 @@ jobs: permissions: id-token: write contents: write + pull-requests: write steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - persist-credentials: false + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + persist-credentials: false + ref: main - - name: Release Keep - uses: python-semantic-release/python-semantic-release@v9.4.0 - env: - GH_TOKEN: ${{ secrets.SHAHAR_PAT }} - with: - git_committer_name: Keep Release Bot - git_committer_email: no-reply@keephq.dev - github_token: ${{ secrets.SHAHAR_PAT }} + - name: Release Keep + id: release-step + uses: python-semantic-release/python-semantic-release@v9.8.7 + with: + git_committer_name: Keep Release Bot + git_committer_email: no-reply@keephq.dev + github_token: ${{ secrets.GITHUB_TOKEN }} + push: false + tag: true + commit: true + + - name: Open PR for release branch + id: pr-step + uses: peter-evans/create-pull-request@v6.1.0 + with: + committer: Keep Release Bot + title: "Release - ${{ steps.release-step.outputs.version }}" + branch: release/${{ steps.release-step.outputs.version }} + body: "This PR contains the latest release changes." + draft: false + base: main + + - uses: peter-evans/enable-pull-request-automerge@v3 + with: + token: ${{ secrets.GITHUB_TOKEN }} + pull-request-number: ${{ steps.pr-step.outputs.pull-request-number }} + + - name: Create release + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + tag: "v${{ steps.release-step.outputs.version }}" + run: | + gh release create "$tag" \ + --repo="$GITHUB_REPOSITORY" \ + --title="v${{ steps.release-step.outputs.version }}" \ + --target="release/${{ steps.release-step.outputs.version }}" \ + --generate-notes diff --git a/.github/workflows/run-e2e-tests.yml b/.github/workflows/run-e2e-tests.yml new file mode 100644 index 0000000000..15ee46cb57 --- /dev/null +++ b/.github/workflows/run-e2e-tests.yml @@ -0,0 +1,333 @@ +on: + workflow_call: + inputs: + db-type: + required: true + type: string + redis_enabled: + required: true + type: boolean + python-version: + required: true + type: string + is-fork: + required: true + type: boolean + backend-image-name: + required: true + type: string + frontend-image-name: + required: true + type: string + +jobs: + # Run tests with all services in one job + run-tests: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + env: + REDIS: ${{ inputs.redis_enabled }} + REDIS_HOST: keep-redis + REDIS_PORT: 6379 + BACKEND_IMAGE: ${{ inputs.backend-image-name }} + FRONTEND_IMAGE: ${{ inputs.frontend-image-name }} + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Login to GitHub Container Registry + if: ${{ inputs.is-fork != true }} + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up Python ${{ inputs.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ inputs.python-version }} + + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + virtualenvs-create: true + virtualenvs-in-project: true + + - name: Restore dependencies cache + id: cache-deps + uses: actions/cache@v4.2.0 + with: + path: .venv + key: pydeps-${{ hashFiles('**/poetry.lock') }} + + # Always install dependencies to ensure venv is valid + # When cached, this completes quickly; when broken, this fixes it + - name: Install dependencies using poetry + run: poetry install --no-interaction --no-root --with dev + + - name: Get Playwright version from poetry.lock + id: playwright-version + run: | + PLAYWRIGHT_VERSION=$(grep "playwright" poetry.lock -A 5 | grep "version" | head -n 1 | cut -d'"' -f2) + echo "version=$PLAYWRIGHT_VERSION" >> $GITHUB_OUTPUT + + - name: Cache Playwright browsers + id: playwright-cache + uses: actions/cache@v4.2.0 + with: + path: ~/.cache/ms-playwright + key: playwright-${{ steps.playwright-version.outputs.version }} + + - name: Install Playwright and dependencies + if: steps.playwright-cache.outputs.cache-hit != 'true' + run: | + poetry run playwright install --with-deps + + # For forks: Build images locally again since they don't persist between jobs + - name: Set up Docker Buildx + if: ${{ inputs.is-fork == true }} + id: buildx + uses: docker/setup-buildx-action@v2 + + - name: Rebuild frontend image locally for fork PRs + if: ${{ inputs.is-fork == true }} + uses: docker/build-push-action@v4 + with: + context: keep-ui + file: ./docker/Dockerfile.ui + push: false + load: true + tags: | + keep-frontend:local + cache-from: type=gha + cache-to: type=gha,mode=max + build-args: | + BUILDKIT_INLINE_CACHE=1 + + - name: Rebuild backend image locally for fork PRs + if: ${{ inputs.is-fork == true }} + uses: docker/build-push-action@v4 + with: + context: . + file: ./docker/Dockerfile.api + push: false + load: true + tags: | + keep-backend:local + cache-from: type=gha + cache-to: type=gha,mode=max + build-args: | + BUILDKIT_INLINE_CACHE=1 + + # Create a modified compose file with our built images + - name: Create modified docker-compose file with built images + run: | + cp tests/e2e_tests/docker-compose-e2e-${{ inputs.db-type }}.yml tests/e2e_tests/docker-compose-modified.yml + + # Replace image placeholders with actual image references + sed -i "s|%KEEPFRONTEND_IMAGE%|${{ env.FRONTEND_IMAGE }}|g" tests/e2e_tests/docker-compose-modified.yml + sed -i "s|%KEEPBACKEND_IMAGE%|${{ env.BACKEND_IMAGE }}|g" tests/e2e_tests/docker-compose-modified.yml + + # cat the modified file for debugging + cat tests/e2e_tests/docker-compose-modified.yml + + # Start ALL services in one go + - name: Start ALL services + run: | + echo "Starting ALL services for ${{ inputs.db-type }}..." + + # Pull the required images first (only needed for non-fork builds) + if [[ "${{ inputs.is-fork }}" != "true" ]]; then + docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml pull + fi + + # Start all services together + docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml up -d + + # Show running containers + docker ps + + # Show the images sha of the running containers + docker images + + # Wait for all services to be ready + - name: Wait for services to be ready + run: | + # Function for exponential backoff + function wait_for_service() { + local service_name=$1 + local check_command=$2 + local max_attempts=$3 + local compose_service=$4 # Docker Compose service name + local attempt=0 + local wait_time=1 + + echo "Waiting for $service_name to be ready..." + until eval "$check_command"; do + if [ "$attempt" -ge "$max_attempts" ]; then + echo "Max attempts reached, exiting..." + # Show final logs before exiting + if [ ! -z "$compose_service" ]; then + echo "===== FINAL LOGS FOR ON ERROR EXIT $compose_service =====" + docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml logs $compose_service + echo "==========================================" + fi + exit 1 + fi + + echo "Waiting for $service_name... (Attempt: $((attempt+1)), waiting ${wait_time}s)" + + # Print logs using docker compose + if [ ! -z "$compose_service" ]; then + echo "===== RECENT LOGS FOR $compose_service =====" + docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml logs $compose_service --tail 100 + echo "==========================================" + fi + + attempt=$((attempt+1)) + sleep $wait_time + # Exponential backoff with max of 8 seconds + wait_time=$((wait_time * 2 > 8 ? 8 : wait_time * 2)) + done + echo "$service_name is ready!" + + # last time, print logs using docker compose + if [ ! -z "$compose_service" ]; then + echo "===== FINAL LOGS FOR $compose_service =====" + docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml logs $compose_service --tail 100 + echo "==========================================" + fi + } + + # Database checks + if [ "${{ inputs.db-type }}" == "mysql" ]; then + wait_for_service "MySQL Database" "docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml exec -T keep-database mysqladmin ping -h \"localhost\" --silent" 10 "keep-database" + wait_for_service "MySQL Database (DB AUTH)" "docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml exec -T keep-database-db-auth mysqladmin ping -h \"localhost\" --silent" 10 "keep-database-db-auth" + elif [ "${{ inputs.db-type }}" == "postgres" ]; then + wait_for_service "Postgres Database" "docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml exec -T keep-database pg_isready -h localhost -U keepuser" 10 "keep-database" + wait_for_service "Postgres Database (DB AUTH)" "docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml exec -T keep-database-db-auth pg_isready -h localhost -U keepuser" 10 "keep-database-db-auth" + fi + + # Wait for services with health checks + wait_for_service "Keep backend" "curl --output /dev/null --silent --fail http://localhost:8080/healthcheck" 15 "keep-backend" + wait_for_service "Keep backend (DB AUTH)" "curl --output /dev/null --silent --fail http://localhost:8081/healthcheck" 15 "keep-backend-db-auth" + wait_for_service "Keep frontend" "curl --output /dev/null --silent --fail http://localhost:3000/" 15 "keep-frontend" + wait_for_service "Keep frontend (DB AUTH)" "curl --output /dev/null --silent --fail http://localhost:3001/" 15 "keep-frontend-db-auth" + + # Give Prometheus and Grafana extra time to initialize + # (using direct curl commands instead of container exec) + echo "Waiting for Prometheus to be ready..." + MAX_ATTEMPTS=15 + for i in $(seq 1 $MAX_ATTEMPTS); do + if curl --output /dev/null --silent --fail http://localhost:9090/-/healthy; then + echo "Prometheus is ready!" + break + elif [ $i -eq $MAX_ATTEMPTS ]; then + echo "Prometheus did not become ready in time, but continuing..." + docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml logs prometheus-server-for-test-target --tail 50 + else + echo "Waiting for Prometheus... Attempt $i/$MAX_ATTEMPTS" + sleep 5 + fi + done + + echo "Waiting for Grafana to be ready..." + MAX_ATTEMPTS=15 + for i in $(seq 1 $MAX_ATTEMPTS); do + if curl --output /dev/null --silent --fail http://localhost:3002/api/health; then + echo "Grafana is ready!" + break + elif [ $i -eq $MAX_ATTEMPTS ]; then + echo "Grafana did not become ready in time, but continuing..." + docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml logs grafana --tail 50 + else + echo "Waiting for Grafana... Attempt $i/$MAX_ATTEMPTS" + sleep 5 + fi + done + + # Give everything a bit more time to stabilize + echo "Giving services additional time to stabilize..." + sleep 10 + + # Debug the environment before running tests + - name: Debug environment + run: | + echo "Checking all container status..." + docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml ps + + echo "Network information:" + docker network ls + docker network inspect keep_default || true + + echo "Testing Prometheus API..." + curl -v http://localhost:9090/api/v1/status/config || echo "Prometheus API not responding, but continuing..." + + echo "Testing Grafana API..." + curl -v http://localhost:3002/api/health || echo "Grafana API not responding, but continuing..." + + echo "Test Keep Frontend..." + curl -v http://localhost:3000/ || echo "Keep Frontend not responding, but continuing..." + + echo "Test Keep Frontend with DB Auth..." + curl -v http://localhost:3001/ || echo "Keep Frontend with DB Auth not responding, but continuing..." + + echo "Listing available ports:" + netstat -tuln | grep -E '3000|3001|3002|8080|8081|9090' + + # Run e2e tests + - name: Run e2e tests and report coverage + run: | + echo "Running tests..." + poetry run coverage run --branch -m pytest -v tests/e2e_tests/ -n 4 --dist=loadfile + echo "Tests completed!" + + - name: Convert coverage results to JSON (for CodeCov support) + run: poetry run coverage json --omit="keep/providers/*" + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v3 + with: + fail_ci_if_error: false + files: coverage.json + verbose: true + + # Collect logs + - name: Dump logs + if: always() + run: | + docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml logs keep-backend > backend_logs-${{ inputs.db-type }}.txt + docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml logs keep-frontend > frontend_logs-${{ inputs.db-type }}.txt + docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml logs keep-backend-db-auth > backend_logs-${{ inputs.db-type }}-db-auth.txt + docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml logs keep-frontend-db-auth > frontend_logs-${{ inputs.db-type }}-db-auth.txt + docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml logs prometheus-server-for-test-target > prometheus_logs-${{ inputs.db-type }}.txt + docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml logs grafana > grafana_logs-${{ inputs.db-type }}.txt + continue-on-error: true + + # Upload artifacts + - name: Upload test artifacts on failure + if: always() + uses: actions/upload-artifact@v4.4.3 + with: + name: test-artifacts-db-${{ inputs.db-type }}-redis-${{ inputs.redis_enabled }} + path: | + playwright_dump_*.html + playwright_dump_*.png + playwright_dump_*.txt + playwright_dump_*.json + backend_logs-${{ inputs.db-type }}.txt + frontend_logs-${{ inputs.db-type }}.txt + backend_logs-${{ inputs.db-type }}-db-auth.txt + frontend_logs-${{ inputs.db-type }}-db-auth.txt + prometheus_logs-${{ inputs.db-type }}.txt + grafana_logs-${{ inputs.db-type }}.txt + continue-on-error: true + + # Tear down environment + - name: Tear down environment + if: always() + run: | + docker compose -p keep --project-directory . -f tests/e2e_tests/docker-compose-modified.yml down diff --git a/.github/workflows/test-docs.yml b/.github/workflows/test-docs.yml new file mode 100644 index 0000000000..61aad64d57 --- /dev/null +++ b/.github/workflows/test-docs.yml @@ -0,0 +1,70 @@ +name: Test docs +on: + push: + paths: + - 'keep/providers/**' + - 'docs/**' + - 'examples/**' + pull_request: + paths: + - 'keep/providers/**' + - 'docs/**' + - 'examples/**' + workflow_dispatch: +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref }}-${{ github.job }} + cancel-in-progress: true +env: + PYTHON_VERSION: 3.11 + STORAGE_MANAGER_DIRECTORY: /tmp/storage-manager + +jobs: + tests-docs: + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v3 + - uses: chartboost/ruff-action@v1 + with: + src: "./keep" + - name: Set up Python ${{ env.PYTHON_VERSION }} + uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + virtualenvs-create: true + virtualenvs-in-project: true + + - name: cache deps + id: cache-deps + uses: actions/cache@v4.2.0 + with: + path: .venv + key: pydeps-${{ hashFiles('**/poetry.lock') }} + + - name: Install dependencies using poetry + run: poetry install --no-interaction --no-root --with dev + + - name: Validate docs/providers/overview.mdx + run: | + cd scripts; + poetry run python ./docs_get_providers_list.py --validate + + - name: Validate snippets for providers + run: | + poetry run python ./scripts/docs_render_provider_snippets.py --validate + + - name: Validate broken links and navigation + run: | + npm i -g mintlify; + + cd docs && mintlify broken-links; + cd ../scripts; + ./docs_validate_navigation.sh; + + # Todo: validate if openapi schema is matching with the code + \ No newline at end of file diff --git a/.github/workflows/test-pr-e2e.yml b/.github/workflows/test-pr-e2e.yml index 9ebaa40d3a..564ca32277 100644 --- a/.github/workflows/test-pr-e2e.yml +++ b/.github/workflows/test-pr-e2e.yml @@ -4,12 +4,17 @@ on: workflow_dispatch: pull_request: paths: - - 'keep/**' - - 'keep-ui/**' - - 'tests/**' + - "keep/**" + - "keep-ui/**" + - "tests/**" + +# Add permissions for GitHub Container Registry +permissions: + contents: read + packages: write concurrency: - group: ${{ github.workflow }}-${{ github.head_ref }} + group: ${{ github.event_name }}-${{ github.workflow }}-${{ github.head_ref }} cancel-in-progress: true env: @@ -22,21 +27,21 @@ env: POSTGRES_USER: keepuser POSTGRES_PASSWORD: keeppassword POSTGRES_DB: keepdb + # To test if imports are working properly + EE_ENABLED: true + # Docker Compose project name + COMPOSE_PROJECT_NAME: keep + # Check if PR is from fork (external contributor) + IS_FORK: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} jobs: - tests: + # Prepare test environment in parallel with Docker builds + prepare-test-environment: runs-on: ubuntu-latest - strategy: - matrix: - db_type: [mysql, postgres] steps: - name: Checkout uses: actions/checkout@v3 - - uses: chartboost/ruff-action@v1 - with: - src: "./keep" - - name: Set up Python ${{ env.PYTHON_VERSION }} uses: actions/setup-python@v4 with: @@ -50,7 +55,7 @@ jobs: - name: Cache dependencies id: cache-deps - uses: actions/cache@v2 + uses: actions/cache@v4.2.0 with: path: .venv key: pydeps-${{ hashFiles('**/poetry.lock') }} @@ -58,102 +63,262 @@ jobs: - name: Install dependencies using poetry run: poetry install --no-interaction --no-root --with dev - - name: Install Playwright dependencies - run: npx playwright install --with-deps + - name: Get Playwright version from poetry.lock + id: playwright-version + run: | + PLAYWRIGHT_VERSION=$(grep "playwright" poetry.lock -A 5 | grep "version" | head -n 1 | cut -d'"' -f2) + echo "version=$PLAYWRIGHT_VERSION" >> $GITHUB_OUTPUT + + - name: Cache Playwright browsers + id: playwright-cache + uses: actions/cache@v4.2.0 + with: + path: ~/.cache/ms-playwright + key: playwright-${{ steps.playwright-version.outputs.version }} - - name: Install playwright - run: poetry run playwright install + - name: Install Playwright and dependencies + run: | + if [ "${{ steps.playwright-cache.outputs.cache-hit }}" != "true" ]; then + poetry run playwright install --with-deps + else + poetry run playwright install-deps + fi + + # Build images in parallel + build-frontend: + runs-on: ubuntu-latest + outputs: + image_name: ${{ steps.set-image-name.outputs.image_name }} + permissions: + contents: read + packages: write + steps: + - name: Set image name + id: set-image-name + run: | + if [[ "${{ env.IS_FORK }}" == "true" ]]; then + echo "image_name=keep-frontend:local" >> $GITHUB_OUTPUT + else + echo "image_name=ghcr.io/${{ github.repository_owner }}/keep-frontend:${{ github.sha }}" >> $GITHUB_OUTPUT + fi + + - name: Login to GitHub Container Registry + if: ${{ env.IS_FORK != 'true' }} + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Checkout + uses: actions/checkout@v3 - name: Set up Docker Buildx + id: buildx uses: docker/setup-buildx-action@v2 - - name: Set up Keep environment + - name: Set cache key variables + id: cache-keys run: | - DOCKER_BUILDKIT=1 docker-compose \ - --project-directory . \ - -f tests/e2e_tests/docker-compose-e2e-${{ matrix.db_type }}.yml up -d + # Create a safe branch name for cache key (replace / with - and remove special chars) + SAFE_BRANCH=$(echo "${{ github.head_ref || github.ref_name }}" | sed 's/\//-/g' | sed 's/[^a-zA-Z0-9._-]//g') + echo "SAFE_BRANCH_NAME=${SAFE_BRANCH}" >> $GITHUB_OUTPUT - - name: Wait for database to be ready + # Create a hash ONLY of the dependencies section of package.json and package-lock.json + # This ensures the hash only changes when dependencies change + DEPS_HASH=$(jq '.dependencies' keep-ui/package.json | sha256sum | cut -d ' ' -f 1) + echo "DEPS_HASH=${DEPS_HASH:0:8}" >> $GITHUB_OUTPUT + + - name: Debug repository and cache info run: | - # Add commands to wait for the database to be ready - if [ "${{ matrix.db_type }}" == "mysql" ]; then - until docker exec $(docker ps -qf "name=keep-database") mysqladmin ping -h "localhost" --silent; do - echo "Waiting for MySQL to be ready..." - sleep 2 - done - elif [ "${{ matrix.db_type }}" == "postgres" ]; then - until docker exec $(docker ps -qf "name=keep-database") pg_isready -h localhost -U keepuser; do - echo "Waiting for Postgres to be ready..." - sleep 2 - done - fi + echo "Repository: ${{ github.repository }}" + echo "Repository owner: ${{ github.repository_owner }}" + echo "Branch: ${{ github.head_ref || github.ref_name }}" + echo "Safe branch name: ${{ steps.cache-keys.outputs.SAFE_BRANCH_NAME }}" + echo "Dependencies hash: ${{ steps.cache-keys.outputs.DEPS_HASH }}" + echo "Is fork: ${{ env.IS_FORK }}" - # wait to keep backend on port 8080 - echo "Waiting for Keep backend to be ready..." - attempt=0 - max_attempts=10 - - until $(curl --output /dev/null --silent --fail http://localhost:8080/healthcheck); do - if [ "$attempt" -ge "$max_attempts" ]; then - echo "Max attempts reached, exiting..." - exit 1 - fi - echo "Waiting for Keep backend to be ready... (Attempt: $((attempt+1)))" - attempt=$((attempt+1)) - sleep 2 - done - - echo "Keep backend is ready!" - # wait to the backend - echo "Waiting for Keep frontend to be ready..." - attempt=0 - max_attempts=10 - - until $(curl --output /dev/null --silent --fail http://localhost:3000/); do - if [ "$attempt" -ge "$max_attempts" ]; then - echo "Max attempts reached, exiting..." - exit 1 - fi - echo "Waiting for Keep frontend to be ready... (Attempt: $((attempt+1)))" - attempt=$((attempt+1)) - sleep 2 - done - - # create the state directory - # mkdir -p ./state && chown -R root:root ./state && chmod -R 777 ./state - - name: Run e2e tests and report coverage + # Pre-check if branch cache exists (only for non-forks) + - name: Check if branch cache exists + id: branch-cache-exists + if: ${{ env.IS_FORK != 'true' }} + continue-on-error: true run: | - poetry run coverage run --branch -m pytest -s tests/e2e_tests/ + BRANCH_CACHE_TAG="ghcr.io/${{ github.repository_owner }}/keep-frontend:cache-${{ steps.cache-keys.outputs.SAFE_BRANCH_NAME }}" + if docker buildx imagetools inspect "$BRANCH_CACHE_TAG" &>/dev/null; then + echo "Branch cache exists: $BRANCH_CACHE_TAG" + echo "cache_exists=true" >> $GITHUB_OUTPUT + else + echo "Branch cache does not exist: $BRANCH_CACHE_TAG" + echo "cache_exists=false" >> $GITHUB_OUTPUT + fi - - name: Convert coverage results to JSON (for CodeCov support) - run: poetry run coverage json --omit="keep/providers/*" + - name: Log frontend cache status + if: ${{ env.IS_FORK != 'true' }} + run: | + if [ "${{ steps.branch-cache-exists.outputs.cache_exists }}" == "true" ]; then + echo "FRONTEND CACHE HIT ✅" + echo "Cache tag: ghcr.io/${{ github.repository_owner }}/keep-frontend:cache-${{ steps.cache-keys.outputs.SAFE_BRANCH_NAME }}" + else + echo "FRONTEND CACHE MISS ❌" + echo "Will attempt to use main branch cache and create a new branch cache" + fi - - name: Upload coverage reports to Codecov - uses: codecov/codecov-action@v3 + # For non-forks: Build and push to registry + - name: Build and push frontend image with registry cache + if: ${{ env.IS_FORK != 'true' }} + uses: docker/build-push-action@v4 with: - fail_ci_if_error: false # don't fail if we didn't manage to upload the coverage report - files: coverage.json - verbose: true + context: keep-ui + file: ./docker/Dockerfile.ui + push: true + tags: | + ghcr.io/${{ github.repository_owner }}/keep-frontend:${{ github.sha }} + # Use registry-based caching with branch-specific tags + cache-from: | + type=registry,ref=ghcr.io/${{ github.repository_owner }}/keep-frontend:cache-${{ steps.cache-keys.outputs.SAFE_BRANCH_NAME }} + type=registry,ref=ghcr.io/${{ github.repository_owner }}/keep-frontend:cache-${{ steps.cache-keys.outputs.DEPS_HASH }} + type=registry,ref=ghcr.io/${{ github.repository_owner }}/keep-frontend:cache-main + cache-to: | + type=registry,ref=ghcr.io/${{ github.repository_owner }}/keep-frontend:cache-${{ steps.cache-keys.outputs.SAFE_BRANCH_NAME }},mode=max + type=registry,ref=ghcr.io/${{ github.repository_owner }}/keep-frontend:cache-${{ steps.cache-keys.outputs.DEPS_HASH }},mode=max + # Add build args for better caching + build-args: | + BUILDKIT_INLINE_CACHE=1 + # Verbose output + outputs: type=image,push=true - - name: Dump backend logs - if: failure() + build-backend: + runs-on: ubuntu-latest + outputs: + image_name: ${{ steps.set-image-name.outputs.image_name }} + permissions: + contents: read + packages: write + steps: + - name: Set image name + id: set-image-name run: | - docker-compose --project-directory . -f tests/e2e_tests/docker-compose-e2e-${{ matrix.db_type }}.yml logs keep-backend > backend_logs-${{ matrix.db_type }}.txt - docker-compose --project-directory . -f tests/e2e_tests/docker-compose-e2e-${{ matrix.db_type }}.yml logs keep-frontend > frontend_logs-${{ matrix.db_type }}.txt - continue-on-error: true + if [[ "${{ env.IS_FORK }}" == "true" ]]; then + echo "image_name=keep-backend:local" >> $GITHUB_OUTPUT + else + echo "image_name=ghcr.io/${{ github.repository_owner }}/keep-backend:${{ github.sha }}" >> $GITHUB_OUTPUT + fi - - name: Upload test artifacts on failure - if: failure() - uses: actions/upload-artifact@v3 + - name: Login to GitHub Container Registry + if: ${{ env.IS_FORK != 'true' }} + uses: docker/login-action@v2 with: - name: test-artifacts - path: | - playwright_dump_*.html - playwright_dump_*.png - backend_logs-${{ matrix.db_type }}.txt - frontend_logs-${{ matrix.db_type }}.txt + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v2 + + - name: Set cache key variables + id: cache-keys + run: | + # Create a safe branch name for cache key (replace / with - and remove special chars) + SAFE_BRANCH=$(echo "${{ github.head_ref || github.ref_name }}" | sed 's/\//-/g' | sed 's/[^a-zA-Z0-9._-]//g') + echo "SAFE_BRANCH_NAME=${SAFE_BRANCH}" >> $GITHUB_OUTPUT + + # Create a hash of poetry files for version-specific caching + DEPS_HASH=$(cat poetry.lock pyproject.toml | sha256sum | cut -d ' ' -f 1) + echo "DEPS_HASH=${DEPS_HASH:0:8}" >> $GITHUB_OUTPUT + + - name: Debug repository and cache info + run: | + echo "Repository: ${{ github.repository }}" + echo "Repository owner: ${{ github.repository_owner }}" + echo "Branch: ${{ github.head_ref || github.ref_name }}" + echo "Safe branch name: ${{ steps.cache-keys.outputs.SAFE_BRANCH_NAME }}" + echo "Dependencies hash: ${{ steps.cache-keys.outputs.DEPS_HASH }}" + echo "Is fork: ${{ env.IS_FORK }}" + + # Pre-check if branch cache exists (only for non-forks) + - name: Check if branch cache exists + id: branch-cache-exists + if: ${{ env.IS_FORK != 'true' }} continue-on-error: true + run: | + BRANCH_CACHE_TAG="ghcr.io/${{ github.repository_owner }}/keep-backend:cache-${{ steps.cache-keys.outputs.SAFE_BRANCH_NAME }}" + if docker buildx imagetools inspect "$BRANCH_CACHE_TAG" &>/dev/null; then + echo "Branch cache exists: $BRANCH_CACHE_TAG" + echo "cache_exists=true" >> $GITHUB_OUTPUT + else + echo "Branch cache does not exist: $BRANCH_CACHE_TAG" + echo "cache_exists=false" >> $GITHUB_OUTPUT + fi - - name: Tear down environment + - name: Log backend cache status + if: ${{ env.IS_FORK != 'true' }} run: | - docker-compose --project-directory . -f tests/e2e_tests/docker-compose-e2e-${{ matrix.db_type }}.yml down + if [ "${{ steps.branch-cache-exists.outputs.cache_exists }}" == "true" ]; then + echo "BACKEND CACHE HIT ✅" + echo "Cache tag: ghcr.io/${{ github.repository_owner }}/keep-backend:cache-${{ steps.cache-keys.outputs.SAFE_BRANCH_NAME }}" + else + echo "BACKEND CACHE MISS ❌" + echo "Will attempt to use main branch cache and create a new branch cache" + fi + + # For non-forks: Build and push to registry + - name: Build and push backend image with registry cache + if: ${{ env.IS_FORK != 'true' }} + uses: docker/build-push-action@v4 + with: + context: . + file: ./docker/Dockerfile.api + push: true + tags: | + ghcr.io/${{ github.repository_owner }}/keep-backend:${{ github.sha }} + # Use registry-based caching with branch-specific tags + cache-from: | + type=registry,ref=ghcr.io/${{ github.repository_owner }}/keep-backend:cache-${{ steps.cache-keys.outputs.DEPS_HASH }} + type=registry,ref=ghcr.io/${{ github.repository_owner }}/keep-backend:cache-${{ steps.cache-keys.outputs.SAFE_BRANCH_NAME }} + cache-to: | + type=registry,ref=ghcr.io/${{ github.repository_owner }}/keep-backend:cache-${{ steps.cache-keys.outputs.DEPS_HASH }},mode=max + type=registry,ref=ghcr.io/${{ github.repository_owner }}/keep-backend:cache-${{ steps.cache-keys.outputs.SAFE_BRANCH_NAME }},mode=max + # Add build args for better caching + build-args: | + BUILDKIT_INLINE_CACHE=1 + # Verbose output + outputs: type=image,push=true + + # Run tests with all services in one job + run-mysql-with-redis: + needs: [build-frontend, build-backend, prepare-test-environment] + uses: ./.github/workflows/run-e2e-tests.yml + with: + db-type: mysql + redis_enabled: true + python-version: 3.11 + is-fork: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} + backend-image-name: ${{ needs.build-backend.outputs.image_name }} + frontend-image-name: ${{ needs.build-frontend.outputs.image_name }} + + run-postgresql-without-redis: + needs: [build-frontend, build-backend, prepare-test-environment] + uses: ./.github/workflows/run-e2e-tests.yml + with: + db-type: postgres + redis_enabled: false + python-version: 3.11 + is-fork: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} + backend-image-name: ${{ needs.build-backend.outputs.image_name }} + frontend-image-name: ${{ needs.build-frontend.outputs.image_name }} + + run-sqlite-without-redis: + needs: [build-frontend, build-backend, prepare-test-environment] + uses: ./.github/workflows/run-e2e-tests.yml + with: + db-type: sqlite + redis_enabled: false + python-version: 3.11 + is-fork: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork }} + backend-image-name: ${{ needs.build-backend.outputs.image_name }} + frontend-image-name: ${{ needs.build-frontend.outputs.image_name }} \ No newline at end of file diff --git a/.github/workflows/test-pr-integrations.yml b/.github/workflows/test-pr-integrations.yml new file mode 100644 index 0000000000..f629a6bdce --- /dev/null +++ b/.github/workflows/test-pr-integrations.yml @@ -0,0 +1,112 @@ +name: Integration Tests +on: + push: + branches: + - main + paths: + - "keep/**" + - "tests/**" + pull_request: + paths: + - "keep/**" + - "tests/**" + workflow_dispatch: + +permissions: + actions: write + +concurrency: + group: ${{ github.event_name }}-${{ github.workflow }}-${{ github.head_ref }} + cancel-in-progress: true + +env: + PYTHON_VERSION: 3.11 + STORAGE_MANAGER_DIRECTORY: /tmp/storage-manager + MYSQL_ROOT_PASSWORD: keep + MYSQL_DATABASE: keep + ELASTIC_PASSWORD: keeptests + +jobs: + integration-tests: + runs-on: ubuntu-latest + services: + mysql: + image: mysql:5.7 + env: + MYSQL_ROOT_PASSWORD: ${{ env.MYSQL_ROOT_PASSWORD }} + MYSQL_DATABASE: ${{ env.MYSQL_DATABASE }} + ports: + - 3306:3306 + options: >- + --health-cmd="mysqladmin ping" + --health-interval=10s + --health-timeout=5s + --health-retries=3 + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.4 + ports: + - 9200:9200 + env: + ELASTIC_PASSWORD: ${{ env.ELASTIC_PASSWORD }} + bootstrap.memory_lock: "true" + discovery.type: "single-node" + ES_JAVA_OPTS: "-Xms2g -Xmx2g" + xpack.security.enabled: "true" + keycloak: + image: us-central1-docker.pkg.dev/keephq/keep/keep-keycloak-test + env: + KC_DB: dev-mem + KC_HTTP_RELATIVE_PATH: /auth + KEYCLOAK_ADMIN: keep_kc_admin + KEYCLOAK_ADMIN_PASSWORD: keep_kc_admin + ports: + - 8787:8080 + options: >- + --health-cmd="/opt/keycloak/bin/kcadm.sh config credentials --server http://localhost:8080/auth --realm master --user keep_kc_admin --password keep_kc_admin || exit 1" + --health-interval=10s + --health-timeout=5s + --health-retries=4 + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up Python ${{ env.PYTHON_VERSION }} + uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + virtualenvs-create: true + virtualenvs-in-project: true + + - name: cache deps + id: cache-deps + uses: actions/cache@v4.2.0 + with: + path: .venv + key: pydeps-${{ hashFiles('**/poetry.lock') }} + + - name: Install dependencies using poetry + run: poetry install --no-interaction --no-root --with dev + + - name: Run integration tests and report coverage + run: | + until nc -z 127.0.0.1 3306; do + echo "waiting for MySQL..." + sleep 1 + done + echo "MySQL is up and running!" + poetry run coverage run --omit="*/test*" --branch -m pytest --integration --ignore=tests/e2e_tests/ + + - name: Convert coverage results to JSON + run: poetry run coverage json --omit="keep/providers/*" + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v3 + with: + fail_ci_if_error: false + files: coverage.json + verbose: true diff --git a/.github/workflows/test-pr-ut-ui.yml b/.github/workflows/test-pr-ut-ui.yml new file mode 100644 index 0000000000..3e21a6a4bd --- /dev/null +++ b/.github/workflows/test-pr-ut-ui.yml @@ -0,0 +1,54 @@ +name: Frontend Tests +on: + push: + branches: + - main + paths: + - "keep-ui/**" + pull_request: + paths: + - "keep-ui/**" + workflow_dispatch: + +permissions: + actions: write + +concurrency: + group: ${{ github.event_name }}-${{ github.workflow }}-${{ github.head_ref }} + cancel-in-progress: true + +env: + NODE_VERSION: 20 + +jobs: + frontend-tests: + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up Node.js ${{ env.NODE_VERSION }} + uses: actions/setup-node@v3 + with: + node-version: ${{ env.NODE_VERSION }} + cache: 'npm' + cache-dependency-path: keep-ui/package-lock.json + + - name: Install dependencies + working-directory: keep-ui + run: npm ci + + - name: Run frontend tests + working-directory: keep-ui + run: npm run test + + # Optional: Add coverage reporting if your test setup supports it + # Uncomment and adjust if you have coverage reporting configured + # - name: Upload coverage reports to Codecov + # uses: codecov/codecov-action@v3 + # with: + # fail_ci_if_error: false + # directory: keep-ui/coverage + # flags: frontend + # verbose: true \ No newline at end of file diff --git a/.github/workflows/test-pr-ut.yml b/.github/workflows/test-pr-ut.yml new file mode 100644 index 0000000000..3ebad25949 --- /dev/null +++ b/.github/workflows/test-pr-ut.yml @@ -0,0 +1,71 @@ +name: Unit Tests +on: + push: + branches: + - main + paths: + - "keep/**" + - "tests/**" + pull_request: + paths: + - "keep/**" + - "tests/**" + workflow_dispatch: + +permissions: + actions: write + +concurrency: + group: ${{ github.event_name }}-${{ github.workflow }}-${{ github.head_ref }} + cancel-in-progress: true + +env: + PYTHON_VERSION: 3.11 + SQLALCHEMY_WARN_20: 1 + +jobs: + unit-tests: + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - uses: chartboost/ruff-action@v1 + with: + src: "./keep" + + - name: Set up Python ${{ env.PYTHON_VERSION }} + uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + virtualenvs-create: true + virtualenvs-in-project: true + + - name: cache deps + id: cache-deps + uses: actions/cache@v4.2.0 + with: + path: .venv + key: pydeps-${{ hashFiles('**/poetry.lock') }} + + - name: Install dependencies using poetry + run: poetry install --no-interaction --no-root --with dev + + - name: Run unit tests and report coverage + run: | + poetry run coverage run --omit="*/test*" --branch -m pytest --timeout 20 -n auto --non-integration --ignore=tests/e2e_tests/ + + - name: Convert coverage results to JSON + run: poetry run coverage json --omit="keep/providers/*" + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v3 + with: + fail_ci_if_error: false + files: coverage.json + verbose: true diff --git a/.github/workflows/test-pr.yml b/.github/workflows/test-pr.yml deleted file mode 100644 index 16cbb964af..0000000000 --- a/.github/workflows/test-pr.yml +++ /dev/null @@ -1,90 +0,0 @@ -name: Tests -on: - push: - paths: - - 'keep/**' - pull_request: - paths: - - 'keep/**' - workflow_dispatch: -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref }} - cancel-in-progress: true -# MySQL server and Elasticsearch for testing -env: - PYTHON_VERSION: 3.11 - STORAGE_MANAGER_DIRECTORY: /tmp/storage-manager - MYSQL_ROOT_PASSWORD: keep - MYSQL_DATABASE: keep - ELASTIC_PASSWORD: keeptests - -jobs: - tests: - runs-on: ubuntu-latest - services: - mysql: - image: mysql:5.7 - env: - MYSQL_ROOT_PASSWORD: ${{ env.MYSQL_ROOT_PASSWORD }} - MYSQL_DATABASE: ${{ env.MYSQL_DATABASE }} - ports: - - 3306:3306 - options: >- - --health-cmd="mysqladmin ping" - --health-interval=10s - --health-timeout=5s - --health-retries=3 - elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.4 - ports: - - 9200:9200 - env: - ELASTIC_PASSWORD: ${{ env.ELASTIC_PASSWORD }} - bootstrap.memory_lock: "true" - discovery.type: "single-node" - ES_JAVA_OPTS: "-Xms2g -Xmx2g" - xpack.security.enabled: "true" - - - steps: - - name: Checkout - uses: actions/checkout@v3 - - uses: chartboost/ruff-action@v1 - with: - src: "./keep" - - name: Set up Python ${{ env.PYTHON_VERSION }} - uses: actions/setup-python@v4 - with: - python-version: ${{ env.PYTHON_VERSION }} - - name: Install Poetry - uses: snok/install-poetry@v1 - with: - virtualenvs-create: true - virtualenvs-in-project: true - - name: cache deps - id: cache-deps - uses: actions/cache@v2 - with: - path: .venv - key: pydeps-${{ hashFiles('**/poetry.lock') }} - - name: Install dependencies using poetry - run: poetry install --no-interaction --no-root - - - name: Run unit tests and report coverage - run: | - # Add a step to wait for MySQL to be fully up and running - until nc -z 127.0.0.1 3306; do - echo "waiting for MySQL..." - sleep 1 - done - echo "MySQL is up and running!" - poetry run coverage run --branch -m pytest --ignore=tests/e2e_tests/ - - - name: Convert coverage results to JSON (for CodeCov support) - run: poetry run coverage json --omit="keep/providers/*" - - name: Upload coverage reports to Codecov - uses: codecov/codecov-action@v3 - with: - fail_ci_if_error: false # don't fail if we didn't manage to upload the coverage report - files: coverage.json - verbose: true diff --git a/.github/workflows/test-workflow-examples.yml b/.github/workflows/test-workflow-examples.yml new file mode 100644 index 0000000000..c8754a8188 --- /dev/null +++ b/.github/workflows/test-workflow-examples.yml @@ -0,0 +1,75 @@ +name: Test workflow examples +on: + push: + paths: + - 'keep/providers/**' + - 'examples/workflows/**' + - 'keep-ui/entities/workflows/model/yaml.schema.ts' + - 'keep-ui/scripts/validate-workflow-examples.ts' + pull_request: + paths: + - 'keep/providers/**' + - 'examples/workflows/**' + - 'keep-ui/entities/workflows/model/yaml.schema.ts' + - 'keep-ui/scripts/validate-workflow-examples.ts' + workflow_dispatch: +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref }}-${{ github.job }} + cancel-in-progress: true +env: + NODE_VERSION: 20 + PYTHON_VERSION: 3.11 + STORAGE_MANAGER_DIRECTORY: /tmp/storage-manager + +jobs: + test-workflow-examples: + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - uses: chartboost/ruff-action@v1 + with: + src: "./keep" + + - name: Set up Python ${{ env.PYTHON_VERSION }} + uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + virtualenvs-create: true + virtualenvs-in-project: true + + - name: cache deps + id: cache-deps + uses: actions/cache@v4.2.0 + with: + path: .venv + key: pydeps-${{ hashFiles('**/poetry.lock') }} + + - name: Install dependencies using poetry + run: poetry install --no-interaction --no-root --with dev + + # Save list of providers to providers_list.json, because we don't have backend endpoint to get it + - name: Save providers list + run: | + PYTHONPATH="${{ github.workspace }}" poetry run python ./scripts/save_providers_list.py + + - name: Set up Node.js ${{ env.NODE_VERSION }} + uses: actions/setup-node@v3 + with: + node-version: ${{ env.NODE_VERSION }} + cache: 'npm' + cache-dependency-path: keep-ui/package-lock.json + + - name: Install dependencies + working-directory: keep-ui + run: npm ci + + - name: Run workflow examples validation + working-directory: keep-ui + run: npm run test:workflow-examples diff --git a/.gitignore b/.gitignore index a4c1a85316..c59aa1db94 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,9 @@ __pycache__/ # C extensions *.so +# .csv files +*.csv + # Distribution / packaging .Python build/ @@ -186,6 +189,7 @@ keep-ui/node_modules/* cov.xml keep.db +keepdd.db RANDOM_USER_ID storage @@ -195,8 +199,31 @@ tempo-data/ # docs docs/node_modules/ +oauth2.cfg + scripts/automatic_extraction_rules.py playwright_dump_*.html playwright_dump_*.png +playwright_dump_*.txt +playwright_dump_*.json + +ee/experimental/ai_temp/* +,e!ee/experimental/ai_temp/.gitkeep + +oauth2.cfg +scripts/keep_slack_bot.py +*.db +providers_cache.json +providers_list.json +workflow-yaml-json-schema.json + +tests/provision/* +!tests/provision/workflows* +grafana/* +!grafana/provisioning/ +!grafana/dashboards/ +keep/providers/grafana_provider/grafana/png/* +topology.sh +posthog.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ec7b221de9..2fc9e4e62c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,12 +7,6 @@ repos: language: system types: [python] require_serial: true - # - id: yamllint - # name: yamllint - # description: This hook runs yamllint. - # entry: yamllint - # language: python - # types: [file, yaml] - id: end-of-file-fixer name: Fix End of Files entry: end-of-file-fixer @@ -38,10 +32,17 @@ repos: hooks: # Run the linter. - id: ruff - args: [ --fix ] + args: [--fix] - repo: https://github.com/compilerla/conventional-pre-commit rev: v2.1.1 hooks: - id: conventional-pre-commit stages: [commit-msg] args: [] # optional: list of Conventional Commits types to allow e.g. [feat, fix, ci, chore, test] + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v3.0.3 + hooks: + - id: prettier + types_or: + [javascript, jsx, ts, tsx, json, yaml, css, scss, html, markdown] + args: [--write] diff --git a/README.md b/README.md index 40fdd2d16d..5b44b1df93 100644 --- a/README.md +++ b/README.md @@ -2,264 +2,910 @@ -

The open-source alert management and AIOps platform

+

The open-source AIOps and alert management platform

-
Single pane of glass, filtering, bi-directional integrations, alert correlation, workflows, enrichment, dashboards. -
AI correlation and AI summarization are under the limited preview (Book a Demo)

+
Single pane of glass, alert deduplication, enrichment, filtering and correlation, bi-directional integrations, workflows, dashboards. +
+
+ +

- Why Keep? - · - Getting started - · - Supported tools and integrations - · Docs · Try it out · - Website - · Report Bug · - Slack Community + Book a Demo + · + Website

+
+ Sneak preview screenshot +
-## How does it work? -1. **Connect your tools**: Connect everything from monitoring platforms to databases and ticketing systems. -
+

-| Connect providers | Receive alerts | -|----------|----------| -| | | +- 🔍 **Single pane of glass** - Best-in-class customizable UI for all your alerts and incidents +- 🛠️ **Swiss Army Knife for alerts** - Deduplication, correlation, filtering and enrichment +- 🔄 **Deep integrations** - Bi-directional syncs with monitoring tools, customizable workflows +- ⚡ **[Automation](#workflows)** - GitHub Actions for your monitoring tools +- 🤖 **AIOps 2.0** - AI-powered correlation and summarization -
+
-2. **Set up Workflows**: Initiate automated workflows in response to alerts or based on custom intervals. +> See full [platform documentation](https://docs.keephq.dev). -
+
+## Supported Integrations + +> View the full list in our [documentation](https://docs.keephq.dev/providers/documentation) + +> Missing a provider? [Submit a new provider request](https://github.com/keephq/keep/issues/new?assignees=&labels=provider&projects=&template=new_provider_request.md&title=) and we'll add it quickly! + +### AI Backends for Enrichments, Correlations and Incident Context Gathering + + + + + + + + + + + + + +
+ + Anthropic
+ Anthropic +
+
+ + OpenAI
+ OpenAI +
+
+ + DeepSeek
+ DeepSeek +
+
+ + Ollama
+ Ollama +
+
+ + LlamaCPP
+ LlamaCPP +
+
+ + Grok
+ Grok +
+
+ + Gemini
+ Gemini +
+
+ +### Observability Tools + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + AppDynamics
+ AppDynamics +
+
+ + Axiom
+ Axiom +
+
+ + Azure Monitoring
+ Azure Monitoring +
+
+ + Centreon
+ Centreon +
+
+ + Checkmk
+ Checkmk +
+
+ + Cilium
+ Cilium +
+
+ + Checkly
+ Checkly +
+
+ + CloudWatch
+ CloudWatch +
+
+ + Coralogix
+ Coralogix +
+
+ + Dash0
+ Dash0 +
+
+ + Datadog
+ Datadog +
+
+ + Dynatrace
+ Dynatrace +
+
+ + Elastic
+ Elastic +
+
+ + GCP Monitoring
+ GCP Monitoring +
+
+ + Grafana
+ Grafana +
+
+ + Grafana Loki
+ Grafana Loki +
+
+ + Graylog
+ Graylog +
+
+ + Icinga2 +
+ Icinga2 +
+
+ + Kibana
+ Kibana +
+
+ + LibreNMS
+ LibreNMS +
+
+ + NetBox
+ NetBox +
+
+ + Netdata
+ Netdata +
+
+ + New Relic
+ New Relic +
+
+ + OpenSearch Serverless
+ OpenSearch Serverless +
+
+ + Parseable
+ Parseable +
+
+ + Pingdom
+ Pingdom +
+
+ + Prometheus
+ Prometheus +
+
+ + Rollbar
+ Rollbar +
+
+ + Sentry
+ Sentry +
+
+ + SignalFX
+ SignalFX +
+
+ + OpenObserve
+ OpenObserve +
+
+ + Site24x7
+ Site24x7 +
+
+ + Splunk
+ Splunk +
+
+ + StatusCake
+ StatusCake +
+
+ + SumoLogic
+ SumoLogic +
+
+ + SumoLogic
+ ThousandEyes +
+
+ + UptimeKuma
+ UptimeKuma +
+
+ + VictoriaLogs
+ VictoriaLogs +
+
+ + VictoriaMetrics
+ VictoriaMetrics +
+
+ + Wazuh
+ Wazuh +
+
+ + Zabbix
+ Zabbix +
+
+ +### Databases & Data Warehouses + + + + + + + + + + + + + +
+ + BigQuery
+ BigQuery +
+
+ + ClickHouse
+ ClickHouse +
+
+ + Databend
+ Databend +
+
+ + MongoDB
+ MongoDB +
+
+ + MySQL
+ MySQL +
+
+ + PostgreSQL
+ PostgreSQL +
+
+ + Snowflake
+ Snowflake +
+
+ +### Communication Platforms + + + + + + + + + + + + + + + + + + + + + + + +
+ + Discord
+ Discord +
+
+ + Google Chat
+ Google Chat +
+
+ + Mailgun
+ Mailgun +
+
+ + Mattermost
+ Mattermost +
+
+ + Ntfy.sh
+ Ntfy.sh +
+
+ + Pushover
+ Pushover +
+
+ + Resend
+ Resend +
+
+ + SendGrid
+ SendGrid +
+
+ + Slack
+ Slack +
+
+ + SMTP
+ SMTP +
+
+ + Telegram
+ Telegram +
+
+ + Twilio
+ Twilio +
+
+ + Teams
+ Teams +
+
+ + Zoom
+ Zoom +
+
+ + Zoom Chat
+ Zoom Chat +
+
+ +### Incident Management + + + + + + + + + + + + + + + + + + +
+ + Grafana Incident
+ Grafana Incident +
+
+ + Grafana OnCall
+ Grafana OnCall +
+
+ + Ilert
+ Ilert +
+
+ + Incident.io
+ Incident.io +
+
+ + AWS Incident Manager
+ AWS Incident Manager +
+
+ + OpsGenie
+ OpsGenie +
+
+ + PagerDuty
+ PagerDuty +
+
+ + Pagertree
+ Pagertree +
+
+ + SINGL4
+ SINGL4 +
+
+ + Squadcast
+ Squadcast +
+
+ + Zenduty
+ Zenduty +
+
+ + Flashduty
+ Flashduty +
+
+ +### Ticketing Tools + + + + + + + + + + + + + + + + + + +
+ + Asana
+ Asana +
+
+ + GitHub
+ GitHub +
+
+ + GitLab
+ GitLab +
+
+ + Jira
+ Jira +
+
+ + Linear
+ Linear +
+
+ + LinearB
+ LinearB +
+
+ + Microsoft Planner
+ Microsoft Planner +
+
+ + Monday
+ Monday +
+
+ + Redmine
+ Redmine +
+
+ + ServiceNow
+ ServiceNow +
+
+ + Trello
+ Trello +
+
+ + YouTrack
+ YouTrack +
+
+ +### Container Orchestration Platforms + + + + + + + + + + +
+ + Azure AKS
+ Azure AKS +
+
+ + ArgoCD
+ ArgoCD +
+
+ + Flux CD
+ Flux +
+
+ + GKE
+ GKE +
+
+ + Kubernetes
+ Kubernetes +
+
+ + OpenShift
+ OpenShift +
+
+ +### Data Enrichment + + + + + + + + + + +
+ + Bash
+ Bash +
+
+ + OpenAI
+ OpenAI +
+
+ + Python
+ Python +
+
+ + QuickChart
+ QuickChart +
+
+ + SSH
+ SSH +
+
+ + Webhook
+ Webhook +
+
+ +### Workflow Orchestration + + + + + +
+ + Airflow
+ Airflow +
+
+ +### Queues + + + + + + +
+ + AmazonSQS
+ Amazon SQS +
+
+ + Kafka
+ Kafka +
+
-| Create and upload workflows | -|----------| -| | +## Workflows -
+Keep is GitHub Actions for your monitoring tools. -3. **Operational efficiency**: Automate your alert handling to focus your team's efforts on what really matters. +A Keep Workflow is a declarative YAML file that automates your alert and incident management. Each workflow consists of: +- **Triggers** - What starts the workflow (alerts, incidents, schedule or manual) +- **Steps** - Read or fetch data (enrichment, context) +- **Actions** - Execute operations (update tickets, send notifications, restart servers) -## Why Keep? -1. **Centralized dashboard**: Manage all your alerts across different platforms in a single interface. -2. **Noise reduction**: Deduplicate and correlate alerts to reduce alert fatigue. -3. **Automation**: Trigger workflows for alert enrichment and response. -4. **Developer-first**: Keep is API-first and lets you manage your workflows as code. -5. **Works with every tool**: Plenty of [supported providers](#supported-providers) and more to come. +Here's a simple workflow that creates a Jira ticket for every `critical` alert from `sentry` for `payments` and `api` services. +For more workflows, see [here](https://github.com/keephq/keep/tree/main/examples/workflows). -## Workflows -The easiest way of thinking about Workflow in Keep is GitHub Actions. At its core, a Workflow in Keep is a declarative YAML file, composed of triggers, steps, and actions and serves to manage, enrich, and automate responses to alerts: ```yaml workflow: - id: most-basic-keep-workflow - description: send a slack message when a cloudwatch alarm is triggered - # workflow triggers - supports alerts, interval, and manual triggers + id: sentry-alerts + description: create ticket alerts for critical alerts from sentry triggers: - type: alert + # customize the filter to run only on critical alert from sentry filters: - key: source - value: cloudwatch - - type: manual - # list of steps that can add context to your alert - steps: - - name: enrich-alert-with-more-data-from-a-database - provider: - type: bigquery - config: "{{ providers.bigquery-prod }}" - with: - query: "SELECT customer_id, customer_type as date FROM `customers_prod` LIMIT 1" - # list of actions that can automate response and do things with your alert + value: sentry + - key: severity + value: critical + # regex to match specific services + - key: service + value: r"(payments|ftp)" actions: - - name: trigger-slack + - name: send-slack-message-team-payments + # if the alert is on the payments service, slack the payments team + if: "'{{ alert.service }}' == 'payments'" provider: type: slack - config: " {{ providers.slack-prod }} " + # control which Slack configuration you want to use + config: " {{ providers.team-payments-slack }} " + # customize the alert message with context from {{ alert }} or any other {{ step }} with: - message: "Got alarm from aws cloudwatch! {{ alert.name }}" + message: | + "A new alert from Sentry: Alert: {{ alert.name }} - {{ alert.description }} + {{ alert}}" + - name: create-jira-ticket-oncall-board + # control the workflow flow with "if" and "foreach" statements + if: "'{{ alert.service }}' == 'ftp' and not '{{ alert.ticket_id }}'" + provider: + type: jira + config: " {{ providers.jira }} " + with: + board_name: "Oncall Board" + custom_fields: + customfield_10201: "Critical" + issuetype: "Task" + # customize the summary + summary: "{{ alert.name }} - {{ alert.description }} (created by Keep)" + description: | + "This ticket was created by Keep. + Please check the alert details below: + {code:json} {{ alert }} {code}" + # enrich the alerts with more context. from now on, the alert will be assigned with the ticket id, type and url + enrich_alert: + - key: ticket_type + value: jira + - key: ticket_id + value: results.issue.key + - key: ticket_url + value: results.ticket_url ``` -Workflow triggers can either be executed manually when an alert is activated or run at predefined intervals. More examples can be found [here](https://github.com/keephq/keep/tree/main/examples/workflows). -## Supported Providers -> Missing any? Just submit a [new provider issue](https://github.com/keephq/keep/issues/new?assignees=&labels=provider&projects=&template=new_provider_request.md&title=) and we will add it in the blink of an eye. +## Enterprise Ready -

Observability tools

-

- -            - -            - -            - -            - -            - -            - -            - -            - -

-

- -            - -            - -            - -            - -            - -            - -

-

Databases and data warehouses

-

- -            - -            - -            - -

-

Communication platforms

-

- -            - -            - -            - -            - -            - -            - -            - -            - -            - -

-

Incident Management tools

-

- -            - -            - -            - -            - -            - -            - -            - -            - -            - -            - -            - -            - -            - -            - -

-

Ticketing tools

-

- -            - -            - -            - -            - -            - -

-

Container Orchestration platforms

-

- -            - -

+- **Developer First** - Modern REST APIs, native SDKs, and comprehensive documentation for seamless integration +- **[Enterprise Security](https://docs.keephq.dev/deployment/authentication/overview)** - Full authentication support (SSO, SAML, OIDC, LDAP) with granular access control (RBAC, ABAC) and team management +- **Flexible Deployment** - Deploy on-premises or in air-gapped environments with cloud-agnostic architecture +- **[Production Scale](https://docs.keephq.dev/deployment/stress-testing)** - High availability, performance-tested infrastructure supporting horizontal scaling for enterprise workloads ## Getting Started -### Overview -Keep composed of three main components: -1. [Keep UI](https://github.com/keephq/keep/tree/main/keep-ui) - A NextJS app to connect your providers, centralize alerts and create the workflows. -2. [Keep Backend](https://github.com/keephq/keep/tree/main/keep) - A FastAPI server that implements the business logic behind Keep, including integrating with the tools, working with alerts and scheduling and running the workflows. -3. [Keep CLI](https://github.com/keephq/keep/blob/main/keep/cli/cli.py) - A CLI that lets you control and manage Keep via CLI. - ->**Disclaimer**: we use [PostHog](https://posthog.com/faq) to collect anonymous telemetries to better learn how users use Keep (masked screen recordings for CLI commands) -To turn PostHog off, set the `DISABLE_POSTHOG=true` environment variable and remove the `NEXT_PUBLIC_POSTHOG_KEY` environment variable. - -### Quickstart -#### Spinning up Keep with docker-compose -The easiest way to start with Keep is to run it via docker-compose: -```shell -curl https://raw.githubusercontent.com/keephq/keep/main/start.sh | sh -``` -The UI is now available at http://localhost:3000 and the backend is available at http://localhost:8080. -#### Spinning up Keep with Helm on Kubernetes/Openshift -To install Keep to your Kubernetes ease free with Helm, run the following commands: +> Need help? Can't find your environment listed? Reach out on Slack and we'll help you quickly. -```shell -helm repo add keephq https://keephq.github.io/helm-charts -helm pull keephq/keep -helm install keep keephq/keep -``` - -More information about the Helm chart can be found [here](https://github.com/keephq/helm-charts). - -#### Local development -You can also start Keep within your favorite IDE, e.g. [VSCode](https://docs.keephq.dev/development/getting-started#vscode) +Keep can run in various environments and configurations. The easiest way to start is with Keep's Docker Compose. -#### Wanna get Keep up and running in production? Go through our detailed [development guide](https://docs.keephq.dev/development) +- Running Keep [locally](https://docs.keephq.dev/development/getting-started). +- Running Keep on [Kubernetes](https://docs.keephq.dev/deployment/kubernetes/installation). +- Running Keep with [Docker](https://docs.keephq.dev/deployment/docker). +- Running Keep on [AWS ECS](https://docs.keephq.dev/deployment/ecs). +- Running Keep on [OpenShift](https://docs.keephq.dev/deployment/kubernetes/openshift). ## 🫵 Keepers ### Top Contributors + A special thanks to our top contributors who help us make Keep great. You are more than awesome! - [Furkan](https://github.com/pehlicd) @@ -268,6 +914,7 @@ A special thanks to our top contributors who help us make Keep great. You are mo Want to become a top contributor? Join our Slack and DM Tal, Shahar, or Furkan. ### Contributors + Thank you for contributing and continuously making Keep better, you're awesome 🫶 diff --git a/assets/connect_providers.gif b/assets/connect_providers.gif deleted file mode 100644 index 9c1069da4c..0000000000 Binary files a/assets/connect_providers.gif and /dev/null differ diff --git a/assets/sneaknew.png b/assets/sneaknew.png new file mode 100644 index 0000000000..022e0a7625 Binary files /dev/null and b/assets/sneaknew.png differ diff --git a/assets/upload_workflow.gif b/assets/upload_workflow.gif deleted file mode 100644 index 2504e743d7..0000000000 Binary files a/assets/upload_workflow.gif and /dev/null differ diff --git a/assets/view_alerts.gif b/assets/view_alerts.gif deleted file mode 100644 index 21b93fe9d4..0000000000 Binary files a/assets/view_alerts.gif and /dev/null differ diff --git a/docker-compose-with-auth.yml b/docker-compose-with-auth.yml index acdf8458b7..6d9d8be528 100644 --- a/docker-compose-with-auth.yml +++ b/docker-compose-with-auth.yml @@ -5,7 +5,8 @@ services: service: keep-frontend-common image: us-central1-docker.pkg.dev/keephq/keep/keep-ui environment: - - AUTH_TYPE=SINGLE_TENANT + - AUTH_TYPE=DB + - NEXTAUTH_SECRET=verysecretkey - API_URL=http://keep-backend:8080 volumes: - ./state:/state @@ -18,7 +19,7 @@ services: service: keep-backend-common image: us-central1-docker.pkg.dev/keephq/keep/keep-api environment: - - AUTH_TYPE=SINGLE_TENANT + - AUTH_TYPE=DB - KEEP_JWT_SECRET=verysecretkey - KEEP_DEFAULT_USERNAME=keep - KEEP_DEFAULT_PASSWORD=keep diff --git a/docker-compose.common.yml b/docker-compose.common.yml index bec91fb877..b2f8d44fcc 100644 --- a/docker-compose.common.yml +++ b/docker-compose.common.yml @@ -7,11 +7,11 @@ services: - NEXTAUTH_URL=http://localhost:3000 - NEXT_PUBLIC_API_URL=http://localhost:8080 - POSTHOG_KEY=phc_muk9qE3TfZsX3SZ9XxX52kCGJBclrjhkP9JxAQcm1PZ - - POSTHOG_HOST=https://app.posthog.com + - POSTHOG_HOST=https://ingest.keephq.dev + - NEXT_PUBLIC_SENTRY_DSN=https://0d4d59e3105ffe8afa27dcb95a222009@o4505515398922240.ingest.us.sentry.io/4508258058764288 - PUSHER_HOST=localhost - PUSHER_PORT=6001 - PUSHER_APP_KEY=keepappkey - - NEXT_PUBLIC_KEEP_VERSION=0.2.9 keep-backend-common: ports: diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml index 2d2da6fcb7..40e727b488 100644 --- a/docker-compose.dev.yml +++ b/docker-compose.dev.yml @@ -5,6 +5,7 @@ services: service: keep-frontend-common environment: - API_URL=http://keep-backend-dev:8080 + - SENTRY_DISABLED=true build: dockerfile: docker/Dockerfile.dev.ui volumes: diff --git a/docker-compose.yml b/docker-compose.yml index 68291e6b6e..8358c4edc3 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -19,6 +19,8 @@ services: image: us-central1-docker.pkg.dev/keephq/keep/keep-api environment: - AUTH_TYPE=NO_AUTH + - PROMETHEUS_MULTIPROC_DIR=/tmp/prometheus + - KEEP_METRICS=true volumes: - ./state:/state @@ -26,3 +28,33 @@ services: extends: file: docker-compose.common.yml service: keep-websocket-server-common + + grafana: + image: grafana/grafana:latest + profiles: + - grafana + ports: + - "3001:3000" + volumes: + - ./grafana:/var/lib/grafana + - ./grafana/provisioning:/etc/grafana/provisioning + - ./grafana/dashboards:/etc/grafana/dashboards + environment: + - GF_SECURITY_ADMIN_USER=admin + - GF_SECURITY_ADMIN_PASSWORD=admin + - GF_USERS_ALLOW_SIGN_UP=false + depends_on: + - prometheus + + prometheus: + image: prom/prometheus:latest + profiles: + - grafana + ports: + - "9090:9090" + volumes: + - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml + command: + - "--config.file=/etc/prometheus/prometheus.yml" + depends_on: + - keep-backend diff --git a/docker/Dockerfile.api b/docker/Dockerfile.api index 4888774618..53c9b5d536 100644 --- a/docker/Dockerfile.api +++ b/docker/Dockerfile.api @@ -1,14 +1,39 @@ -FROM python:3.11.6-slim as base +FROM python:3.13.5-alpine as base + +# Install bash and runtime dependencies for grpc +RUN apk add --no-cache bash libstdc++ ENV PYTHONFAULTHANDLER=1 \ PYTHONHASHSEED=random \ PYTHONUNBUFFERED=1 -RUN useradd --user-group --system --create-home --no-log-init keep +# THIS IS FOR DEBUGGING PURPOSES +# RUN apt-get update && \ +# apt-get install -y --no-install-recommends \ +# iproute2 \ +# net-tools \ +# procps && \ +# rm -rf /var/lib/apt/lists/* + +RUN addgroup -g 1000 keep && \ + adduser -u 1000 -G keep -s /bin/sh -D keep WORKDIR /app FROM base as builder +# Install build dependencies for Alpine +RUN apk add --no-cache \ + gcc \ + g++ \ + musl-dev \ + libffi-dev \ + openssl-dev \ + postgresql-dev \ + mysql-client \ + build-base \ + linux-headers \ + git + ENV PIP_DEFAULT_TIMEOUT=100 \ PIP_DISABLE_PIP_VERSION_CHECK=1 \ PIP_NO_CACHE_DIR=1 \ @@ -17,20 +42,31 @@ ENV PIP_DEFAULT_TIMEOUT=100 \ RUN pip install "poetry==$POETRY_VERSION" RUN python -m venv /venv COPY pyproject.toml poetry.lock ./ -RUN poetry export -f requirements.txt --output requirements.txt --without-hashes && /venv/bin/python -m pip install --upgrade -r requirements.txt +RUN poetry export -f requirements.txt --output requirements.txt --without-hashes --only main && \ + /venv/bin/python -m pip install --upgrade -r requirements.txt && \ + pip uninstall -y poetry COPY keep keep +COPY ee keep/ee COPY examples examples -COPY README.md README.md -RUN poetry build && /venv/bin/pip install --use-deprecated=legacy-resolver dist/*.whl +COPY keep-ui/public/icons/unknown-icon.png unknown-icon.png +RUN /venv/bin/pip install --use-deprecated=legacy-resolver . && \ + rm -rf /root/.cache/pip && \ + find /venv -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true && \ + find /venv -type f -name "*.pyc" -delete 2>/dev/null || true FROM base as final ENV PATH="/venv/bin:${PATH}" ENV VIRTUAL_ENV="/venv" +ENV EE_PATH="ee" COPY --from=builder /venv /venv COPY --from=builder /app/examples /examples +COPY --from=builder /app/unknown-icon.png unknown-icon.png # as per Openshift guidelines, https://docs.openshift.com/container-platform/4.11/openshift_images/create-images.html#use-uid_create-images -RUN chgrp -R 0 /app && chmod -R g=u /app -RUN chown -R keep:keep /app -RUN chown -R keep:keep /venv +RUN chgrp -R 0 /app && chmod -R g=u /app && \ + chown -R keep:keep /app && \ + chown -R keep:keep /venv USER keep -ENTRYPOINT ["gunicorn", "keep.api.api:get_app", "--bind" , "0.0.0.0:8080" , "--workers", "4" , "-k" , "uvicorn.workers.UvicornWorker", "-c", "/venv/lib/python3.11/site-packages/keep/api/config.py"] + +ENTRYPOINT ["/venv/lib/python3.13/site-packages/keep/entrypoint.sh"] + +CMD ["gunicorn", "keep.api.api:get_app", "--bind" , "0.0.0.0:8080" , "--workers", "4" , "-k" , "uvicorn.workers.UvicornWorker", "-c", "/venv/lib/python3.13/site-packages/keep/api/config.py", "--preload"] diff --git a/docker/Dockerfile.dev.api b/docker/Dockerfile.dev.api index 5b3f12b703..2409c4c862 100644 --- a/docker/Dockerfile.dev.api +++ b/docker/Dockerfile.dev.api @@ -17,10 +17,15 @@ RUN python -m venv /venv COPY pyproject.toml ./ RUN . /venv/bin/activate && poetry install --no-root +COPY keep keep +COPY ee keep/ee + # Setting the virtual environment path ENV PYTHONPATH="/app:${PYTHONPATH}" ENV PATH="/venv/bin:${PATH}" ENV VIRTUAL_ENV="/venv" +ENV POSTHOG_DISABLED="true" +ENTRYPOINT ["/app/keep/entrypoint.sh"] CMD ["gunicorn", "keep.api.api:get_app", "--bind" , "0.0.0.0:8080" , "--workers", "1" , "-k" , "uvicorn.workers.UvicornWorker", "-c", "./keep/api/config.py", "--reload"] diff --git a/docker/Dockerfile.dev.ui b/docker/Dockerfile.dev.ui index 0e963bb598..0fd7ae4c8b 100644 --- a/docker/Dockerfile.dev.ui +++ b/docker/Dockerfile.dev.ui @@ -19,7 +19,7 @@ COPY ./keep-ui/ /app RUN npm install # Install next globally and create a symlink RUN npm install -g next -RUN ln -s /usr/local/lib/node_modules/next/dist/bin/next /usr/local/bin/next +RUN ln -s /usr/local/lib/node_modules/next/dist/bin/next /usr/local/bin/next || echo "next binary already linked to bin" # Ensure port 3000 is accessible to our system EXPOSE 3000 diff --git a/docker/Dockerfile.ui b/docker/Dockerfile.ui index 549506c3ed..a385da4448 100644 --- a/docker/Dockerfile.ui +++ b/docker/Dockerfile.ui @@ -1,6 +1,4 @@ - - -FROM node:18-alpine AS base +FROM node:20-alpine AS base # Install dependencies only when needed FROM base AS deps @@ -26,19 +24,21 @@ ENV NEXT_TELEMETRY_DISABLED 1 # If using npm comment out above and use below instead ENV API_URL http://localhost:8080 -RUN npm run build +RUN NODE_OPTIONS=--max-old-space-size=8192 npm run build # Production image, copy all the files and run next FROM base AS runner ARG GIT_COMMIT_HASH=local ARG KEEP_VERSION=local +ARG KEEP_INCLUDE_SOURCES=false WORKDIR /app # Inject the git commit hash into the build # This is being injected from the build script ENV GIT_COMMIT_HASH=${GIT_COMMIT_HASH} ENV KEEP_VERSION=${KEEP_VERSION} +ENV KEEP_INCLUDE_SOURCES=${KEEP_INCLUDE_SOURCES} @@ -65,10 +65,11 @@ EXPOSE 3000 ENV PORT 3000 ENV POSTHOG_KEY=phc_muk9qE3TfZsX3SZ9XxX52kCGJBclrjhkP9JxAQcm1PZ -ENV POSTHOG_HOST=https://app.posthog.com +ENV POSTHOG_HOST=https://ingest.keephq.dev ENV PUSHER_HOST=localhost ENV PUSHER_PORT=6001 ENV PUSHER_APP_KEY=keepappkey +ENV NEXT_PUBLIC_SENTRY_DSN=https://0d4d59e3105ffe8afa27dcb95a222009@o4505515398922240.ingest.us.sentry.io/4508258058764288 ENTRYPOINT ["/app/entrypoint.sh"] diff --git a/docs/alertevaluation/examples/victoriametricsmulti.mdx b/docs/alertevaluation/examples/victoriametricsmulti.mdx new file mode 100644 index 0000000000..edf8f3a4fc --- /dev/null +++ b/docs/alertevaluation/examples/victoriametricsmulti.mdx @@ -0,0 +1,67 @@ +--- +title: "VictoriaMetrics Multi Alert Example" +--- + +This example demonstrates a simple CPU usage multi-alert based on a metric: + +```yaml +workflow: + # Unique identifier for this workflow + id: query-victoriametrics-multi + # Display name shown in the UI + name: victoriametrics-multi-alert-example + # Brief description of what this workflow does + description: victoriametrics + triggers: + # This workflow can be triggered manually from the UI + - type: manual + steps: + # Query VictoriaMetrics for CPU metrics + - name: victoriametrics-step + provider: + # Use the VictoriaMetrics provider configuration + config: "{{ providers.vm }}" + type: victoriametrics + with: + # Query that returns the sum of CPU usage for each job + # Example response: + # [ + # {'metric': {'job': 'victoriametrics'}, 'value': [1737808021, '0.022633333333333307']}, + # {'metric': {'job': 'vmagent'}, 'value': [1737808021, '0.009299999999999998']} + # ] + query: sum(rate(process_cpu_seconds_total)) by (job) + queryType: query + + actions: + # Create an alert in Keep based on the query results + - name: create-alert + provider: + type: keep + with: + # Only create alert if CPU usage is above threshold + if: "{{ value.1 }} > 0.01 " + # Alert must persist for 1 minute + for: 1m + # Use job label to create unique fingerprint for each alert + fingerprint_fields: + - labels.job + alert: + # Alert name includes the specific job + name: "High CPU Usage on {{ metric.job }}" + description: "CPU usage is high on the VM (created from VM metric)" + # Set severity based on CPU usage thresholds: + # > 0.9 = critical + # > 0.7 = warning + # else = info + severity: '{{ value.1 }} > 0.9 ? "critical" : {{ value.1 }} > 0.7 ? "warning" : "info"' + labels: + # Job label is required for alert fingerprinting + job: "{{ metric.job }}" + # Additional context labels + environment: production + app: myapp + service: api + team: devops + owner: alice + +``` diff --git a/docs/alertevaluation/examples/victoriametricssingle.mdx b/docs/alertevaluation/examples/victoriametricssingle.mdx new file mode 100644 index 0000000000..c2cc9e0516 --- /dev/null +++ b/docs/alertevaluation/examples/victoriametricssingle.mdx @@ -0,0 +1,53 @@ +--- +title: "VictoriaMetrics Single Alert Example" +--- + +This example demonstrates a simple CPU usage alert based on a metric: + +```yaml +# This workflow queries VictoriaMetrics metrics and creates alerts based on CPU usage +workflow: + # Unique identifier for this workflow + id: query-victoriametrics + # Display name shown in the UI + name: victoriametrics-alert-example + # Brief description of what this workflow does + description: Monitors CPU usage metrics from VictoriaMetrics and creates alerts when thresholds are exceeded + + # Define how the workflow is triggered + triggers: + - type: manual # Can be triggered manually from the UI + + # Steps to execute in order + steps: + - name: victoriametrics-step + provider: + # Use VictoriaMetrics provider config defined in providers.vm + config: "{{ providers.vm }}" + type: victoriametrics + with: + # Query average CPU usage rate + query: avg(rate(process_cpu_seconds_total)) + queryType: query + + # Actions to take based on the query results + actions: + - name: create-alert + provider: + type: keep + with: + # Create alert if CPU usage exceeds threshold + if: "{{ value.1 }} > 0.0040" + alert: + name: "High CPU Usage" + description: "[Single] CPU usage is high on the VM (created from VM metric)" + # Set severity based on CPU usage thresholds + severity: '{{ value.1 }} > 0.9 ? "critical" : {{ value.1 }} > 0.7 ? "warning" : "info"' + # Alert labels for filtering and routing + labels: + environment: production + app: myapp + service: api + team: devops + owner: alice +``` diff --git a/docs/alertevaluation/overview.mdx b/docs/alertevaluation/overview.mdx new file mode 100644 index 0000000000..360c17bddb --- /dev/null +++ b/docs/alertevaluation/overview.mdx @@ -0,0 +1,52 @@ +--- +title: "Overview" +--- + +The Keep Alert Evaluation Engine is a flexible system that enables you to create alerts based on any data source and define evaluation rules. Unlike traditional monitoring solutions that are tied to specific metrics, Keep's engine allows you to combine data from multiple sources and apply complex logic to determine when and how alerts should be triggered. + +## Core Features + +### Generic Data Source Support +- Query any data source (databases, APIs, metrics systems) +- Combine multiple data sources in a single alert rule +- Apply custom transformations to the data + +### Flexible Alert Evaluation +- Define custom conditions using templated expressions +- Support for complex boolean logic and mathematical operations +- State management for alert transitions (pending->firing->resolved) +- Deduplication and alert instance tracking + +### Customizable Alert Definition +- Full control over alert metadata (name, description, severity) +- Dynamic labels based on evaluation context +- Template support for all alert fields +- Custom fingerprinting for alert grouping + +## Core Components + +### Alert States +- **Pending**: Initial state when alert condition is met (relevant only if `for` supplied) +- **Firing**: Active alert that has met its duration condition +- **Resolved**: Alert that is no longer active + +### Alert Rule Components +1. **Data Collection**: Query steps to gather data from any source +2. **Condition (`if`)**: Expression that determines when to create/update an alert +3. **Duration (`for`)**: Optional time period the condition must be true before firing +4. **Alert Definition**: Complete control over how the alert looks and behaves: + - Name and description + - Severity levels + - Labels for routing + - Custom fields and annotations + +### State Management +- **Fingerprinting**: Unique identifier for alert deduplication and state tracking +- **Keep-Firing**: Control how long alerts remain active +- **State Transitions**: Rules for how alerts move between states + +## Examples +The following examples demonstrate different ways to use the alert evaluation engine: + +- [Single Metric Alert](/alertevaluation/examples/victoriametricssingle) - Basic example showing metrics-based alerting +- [Multiple Metrics Alert](/alertevaluation/examples/victoriametricsmulti) - Advanced example with multiple alert instances diff --git a/docs/alerts/actionmenu.mdx b/docs/alerts/actionmenu.mdx new file mode 100644 index 0000000000..24f49f623d --- /dev/null +++ b/docs/alerts/actionmenu.mdx @@ -0,0 +1,38 @@ +--- +title: "Action Menu" +--- + +The Action Menu in Keep provides quick access to common actions that can be performed on alerts. This menu enables teams to efficiently manage and interact with alerts directly from the table. + + + + + +### (1) Run Workflow +Trigger predefined workflows directly from the Action Menu. This allows automation of actions such as escalating alerts or notifying specific teams. + +### (2) Create a New Workflow +Quickly create a new workflow tailored to the selected alert. This is useful for handling unique cases that require a custom response. + +### (3) View Alert History +Access the full history of the alert, including changes to its status, comments, and any actions performed. This provides a clear timeline of the alert's lifecycle. + +### (4) Manually Enrich Alert +Add custom metadata or details to an alert manually. This can include additional context or information that assists with resolution. + +### (5) Self Assign +Assign the selected alert to yourself. This is ideal for team members who are taking ownership of specific alerts. + +### (6) View Alert +Open the alert details in the sidebar or dedicated alert view for a deeper dive into its metadata and context. + +### (7) Source-Specific Actions +Perform actions that are specific to the source of the alert. For example, linking directly to the monitoring tool or executing source-specific workflows. + +### (8) Dismiss Alert +Mark the alert as dismissed to indicate that no further action is required. This helps in managing and decluttering the alert table. + +### (9) Change Status +Update the status of the alert (e.g., from "firing" to "acknowledged"). This keeps the team informed about the current state of the alert. + +--- diff --git a/docs/alerts/overview.mdx b/docs/alerts/overview.mdx new file mode 100644 index 0000000000..9d4cee37f6 --- /dev/null +++ b/docs/alerts/overview.mdx @@ -0,0 +1,18 @@ +--- +title: "Overview" +--- + +**Alert Management** empowers teams to effectively manage, monitor, and act on critical alerts. + +With a robust and user-friendly interface, Keep allows users to gain deep insights into their alerts, filter through large volumes of data, and take swift actions to maintain system health. + + + + + +Everything related with Alert Management can be customized: + +1. **Alert table** - view and manage the alerts. +2. **Search Bar** - use CEL to filter alerts which can be saved as "Customized Presets". +3. **Facets** - slice and dice alerts. +4. **Columns and Time** - customize columns and theme for your preset. diff --git a/docs/overview/presets.mdx b/docs/alerts/presets.mdx similarity index 78% rename from docs/overview/presets.mdx rename to docs/alerts/presets.mdx index 3480ef040c..e159c335da 100644 --- a/docs/overview/presets.mdx +++ b/docs/alerts/presets.mdx @@ -1,13 +1,17 @@ --- -description: "CEL-Based Alert Filtering" -title: "Presets" +title: "Customized Presets" --- -With Keep's introduction of CEL (Common Expression Language) for alert filtering, users gain the flexibility to define more complex and precise alert filtering logic. This feature allows the creation of customizable filters using CEL expressions to refine alert visibility based on specific criteria. -## Introduction + -CEL-based filtering offers a powerful method for users to specify conditions under which alerts should be shown. Through a combination of logical, comparison, and string operations, alerts can be filtered to meet the exact needs of the user, improving the focus and efficiency of alert management. +You can think of a preset like a "Slack Channel" for your alerts - a logical container to follow only alerts that matter for you. + + + +With Keep's introduction of CEL (Common Expression Language) for alert filtering, users gain the flexibility to define more complex and precise alert filtering logic. + +This feature allows the creation of customizable filters using CEL expressions to refine alert visibility based on specific criteria. ## How It Works @@ -15,19 +19,22 @@ CEL-based filtering offers a powerful method for users to specify conditions und 2. **Preset Definition**: These expressions can be saved as presets for easy application to different alert streams. 3. **Alert Filtering**: When applied, the CEL expressions evaluate each alert against the defined criteria, filtering the alert stream in real-time. -## Practical Example -For instance, a user could create a CEL expression to filter alerts by severity and source, such as `severity == 'critical' && service.contains('database')`, ensuring only critical alerts from database services are displayed. - -## Core Concepts +## Creating a CEL Expression -- **CEL Expressions**: The CEL language syntax used to define alert filtering logic. -- **Presets**: Saved CEL expressions that can be reused across different alert streams. -- **Real-Time Filtering**: The dynamic application of CEL expressions to incoming alerts. +There are two ways of creating a CEL expression in Keep +### Manually creating CEL query -## Creating a CEL Expression +Use the [CEL Language Definition](https://github.com/google/cel-spec/blob/master/doc/langdef.md) documentation to better understand the capabilities of the Common Expression Language +This is an example of how to query all the alerts that came from `Sentry` + + + +If the CEL syntax you typed in is invalid, an error message will show up (in this case, we used invalid `''` instead of `""`): + + + -There is generally two ways of creating a CEL expression in Keep ### Importing from an SQL query 1. Click on the "Import from SQL" button @@ -43,18 +50,6 @@ Which in turn will generate and apply a valid CEL query: -### Manually creating CEL query - -Use the [CEL Language Definition](https://github.com/google/cel-spec/blob/master/doc/langdef.md) documentation to better understand the capabilities of the Common Expression Language -This is an example of how to query all the alerts that came from `Sentry` - - - -If the CEL syntax you typed in is invalid, an error message will show up (in this case, we used invalid `''` instead of `""`): - - - - ## Save Presets You can save your CEL queries into a `Preset` using the "Save current filter as a view" button @@ -70,6 +65,11 @@ The `Preset` will then be created and available for you to quickly navigate and +## Practical Example + +For instance, a user could create a CEL expression to filter alerts by severity and source, such as `severity == 'critical' && service.contains('database')`, ensuring only critical alerts from database services are displayed. + + ## Best Practices - **Specificity in Expressions**: Craft expressions that precisely target the desired alerts to avoid filtering out relevant alerts. diff --git a/docs/alerts/sidebar.mdx b/docs/alerts/sidebar.mdx new file mode 100644 index 0000000000..1bf2ed59df --- /dev/null +++ b/docs/alerts/sidebar.mdx @@ -0,0 +1,32 @@ +--- +title: "Alert Sidebar" +--- + +The Alert Sidebar in Keep provides a detailed view of a selected alert, offering in-depth context and information to aid in alert management and resolution. This feature is designed to give users a comprehensive understanding of the alert without leaving the main interface. + + + + + +### (1) Alert Name +Displays the name of the alert, which typically summarizes the issue or event being reported. This is the primary identifier for the alert. + +### (2) Alert Related Service +Shows the service associated with the alert. This helps teams quickly understand which part of the infrastructure or application is affected. + +### (3) Alert Source +Indicates the source of the alert, such as the monitoring tool or system that generated it (e.g., Prometheus, Datadog). This provides context on where the alert originated. + +### (4) Alert Description +A detailed description of the alert, including specifics about the issue. This section helps provide a deeper understanding of what triggered the alert. + +### (5) Alert Fingerprint +A unique identifier for the alert. The fingerprint is used to correlate alerts and track their lifecycle across systems. + +### (6) Alert Timeline +Displays a chronological history of the alert, including when it was created, acknowledged, updated, or resolved. The timeline provides insights into how the alert has been managed. + +### (7) Alert Topology View +Offers a visual representation of the alert's impact on the system's topology. This view helps identify affected components and their relationships to other parts of the infrastructure. + +--- diff --git a/docs/alerts/sound.mdx b/docs/alerts/sound.mdx new file mode 100644 index 0000000000..ec5dae15af --- /dev/null +++ b/docs/alerts/sound.mdx @@ -0,0 +1,17 @@ +--- +title: "Sound Notifications" +--- + +Sound notifications ensure you never miss important updates or alerts. + +## How It Works +1. **Preset Notifications**: Mark a preset as "noisy," and any alert linked to it will play a sound. Alternatively, set individual alerts as `isNoisy=true` to trigger sounds through linked presets. +2. **Real-Time Alerts**: With WebSocket enabled, alerts arrive instantly. The server notifies the browser, which retrieves and processes new alerts immediately. + +## Who Hears Notifications? +Users with Keep open in their browser and the noisy preset visible in their navigation bar. Presets can be filtered to control notifications. + +### Customizing +1. **Change the Default Sound**: Replace the `alert.mp3` file with a custom audio file of your choice. + +--- \ No newline at end of file diff --git a/docs/alerts/table.mdx b/docs/alerts/table.mdx new file mode 100644 index 0000000000..e27abc39e9 --- /dev/null +++ b/docs/alerts/table.mdx @@ -0,0 +1,69 @@ +--- +title: "Alert Table" +--- + +The Alert Table is the central interface for viewing and managing alerts in Keep. It provides a comprehensive view of all alerts with powerful filtering, sorting, and interaction capabilities. + + + + + +### (1) Columns +Columns in the alert table can be customized to display the most relevant data. Users can select which columns to display and reorder them using drag-and-drop functionality. + + + + + + +### (2) Alert Bulk Action +Easily select one or more alerts for bulk actions. Actions include options like "assign to incident," "dismiss," or other available workflows. + + + + + +### (3) Alert Actions Menu +The actions menu provides quick access to various operations for each alert, such as linking to incidents, creating tickets, or escalating. + + + + + +### (4) Alert Link +Each alert includes a badge that links directly to the original alert in the monitoring tool. Clicking this badge opens the alert in its source system for further investigation. + + + + + +### (5) Alert Ticket +You can asign ticket to alert. If an alert is associated with a ticket, a ticket badge will be displayed. Clicking on this badge navigates directly to the assigned ticket in the ticketing tool. + + + + + +### (6) Alert Comment +Users can add comments to any alert to provide additional context or share insights with team members. This improves collaboration and ensures all relevant information is available. + + + + + +### (7) Alert Related Workflows +View and trigger related workflows for an alert directly from the table. This allows seamless integration with predefined processes like escalation, suppression, or custom automation. + + + + + + +### (8) Sorting +The table supports sorting by any column using the "sort" icon. This makes it easy to prioritize or organize alerts based on specific criteria. + + + + + +--- diff --git a/docs/api-ref/actions/add-actions.mdx b/docs/api-ref/actions/add-actions.mdx deleted file mode 100644 index 0d49ef1f94..0000000000 --- a/docs/api-ref/actions/add-actions.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /actions ---- \ No newline at end of file diff --git a/docs/api-ref/actions/create-actions.mdx b/docs/api-ref/actions/create-actions.mdx deleted file mode 100644 index 0d49ef1f94..0000000000 --- a/docs/api-ref/actions/create-actions.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /actions ---- \ No newline at end of file diff --git a/docs/api-ref/actions/delete-action.mdx b/docs/api-ref/actions/delete-action.mdx deleted file mode 100644 index 1d4fdc257b..0000000000 --- a/docs/api-ref/actions/delete-action.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: delete /actions/{action_id} ---- \ No newline at end of file diff --git a/docs/api-ref/actions/get-actions.mdx b/docs/api-ref/actions/get-actions.mdx deleted file mode 100644 index 138366466c..0000000000 --- a/docs/api-ref/actions/get-actions.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /actions ---- diff --git a/docs/api-ref/actions/put-action.mdx b/docs/api-ref/actions/put-action.mdx deleted file mode 100644 index 63996dc830..0000000000 --- a/docs/api-ref/actions/put-action.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: put /actions/{action_id} ---- \ No newline at end of file diff --git a/docs/api-ref/actions/update-action.mdx b/docs/api-ref/actions/update-action.mdx deleted file mode 100644 index 63996dc830..0000000000 --- a/docs/api-ref/actions/update-action.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: put /actions/{action_id} ---- \ No newline at end of file diff --git a/docs/api-ref/ai/create-alert.mdx b/docs/api-ref/ai/create-alert.mdx deleted file mode 100644 index f2c30f407b..0000000000 --- a/docs/api-ref/ai/create-alert.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /ai/create-alert ---- diff --git a/docs/api-ref/ai/repair-alert.mdx b/docs/api-ref/ai/repair-alert.mdx deleted file mode 100644 index 6844700eba..0000000000 --- a/docs/api-ref/ai/repair-alert.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /ai/repair-alert ---- diff --git a/docs/api-ref/alerts/assign-alert.mdx b/docs/api-ref/alerts/assign-alert.mdx deleted file mode 100644 index 4195b278d1..0000000000 --- a/docs/api-ref/alerts/assign-alert.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /alerts/{fingerprint}/assign/{last_received} ---- \ No newline at end of file diff --git a/docs/api-ref/alerts/delete-alert.mdx b/docs/api-ref/alerts/delete-alert.mdx deleted file mode 100644 index eaa7465af0..0000000000 --- a/docs/api-ref/alerts/delete-alert.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: delete /alerts ---- \ No newline at end of file diff --git a/docs/api-ref/alerts/enrich-alert.mdx b/docs/api-ref/alerts/enrich-alert.mdx deleted file mode 100644 index 6f700169eb..0000000000 --- a/docs/api-ref/alerts/enrich-alert.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /alerts/enrich ---- \ No newline at end of file diff --git a/docs/api-ref/alerts/get-alert-history.mdx b/docs/api-ref/alerts/get-alert-history.mdx deleted file mode 100644 index 6d5c177492..0000000000 --- a/docs/api-ref/alerts/get-alert-history.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /alerts/{fingerprint}/history ---- \ No newline at end of file diff --git a/docs/api-ref/alerts/get-alert.mdx b/docs/api-ref/alerts/get-alert.mdx deleted file mode 100644 index d293028b04..0000000000 --- a/docs/api-ref/alerts/get-alert.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /alerts/{fingerprint} ---- \ No newline at end of file diff --git a/docs/api-ref/alerts/get-alerts.mdx b/docs/api-ref/alerts/get-alerts.mdx deleted file mode 100644 index 17d142a241..0000000000 --- a/docs/api-ref/alerts/get-alerts.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /alerts ---- diff --git a/docs/api-ref/alerts/get-all-alerts.mdx b/docs/api-ref/alerts/get-all-alerts.mdx deleted file mode 100644 index b425ccc40b..0000000000 --- a/docs/api-ref/alerts/get-all-alerts.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /alerts ---- \ No newline at end of file diff --git a/docs/api-ref/alerts/receive-event.mdx b/docs/api-ref/alerts/receive-event.mdx deleted file mode 100644 index 4d18d92b53..0000000000 --- a/docs/api-ref/alerts/receive-event.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /alerts/event/{provider_type} ---- diff --git a/docs/api-ref/alerts/receive-generic-event.mdx b/docs/api-ref/alerts/receive-generic-event.mdx deleted file mode 100644 index ca8fbf0144..0000000000 --- a/docs/api-ref/alerts/receive-generic-event.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /alerts/event ---- \ No newline at end of file diff --git a/docs/api-ref/alerts/search-alerts.mdx b/docs/api-ref/alerts/search-alerts.mdx deleted file mode 100644 index 1b5f4f4ed0..0000000000 --- a/docs/api-ref/alerts/search-alerts.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /alerts/search ---- \ No newline at end of file diff --git a/docs/api-ref/alerts/webhook-challenge.mdx b/docs/api-ref/alerts/webhook-challenge.mdx deleted file mode 100644 index 2aa6c8bb1a..0000000000 --- a/docs/api-ref/alerts/webhook-challenge.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /alerts/event/netdata ---- \ No newline at end of file diff --git a/docs/api-ref/enrichment/create-extraction-rule.mdx b/docs/api-ref/enrichment/create-extraction-rule.mdx deleted file mode 100644 index 235f1589fd..0000000000 --- a/docs/api-ref/enrichment/create-extraction-rule.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /extraction ---- \ No newline at end of file diff --git a/docs/api-ref/enrichment/create-rule.mdx b/docs/api-ref/enrichment/create-rule.mdx deleted file mode 100644 index 6994f6aab7..0000000000 --- a/docs/api-ref/enrichment/create-rule.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /mapping ---- \ No newline at end of file diff --git a/docs/api-ref/enrichment/delete-extraction-rule.mdx b/docs/api-ref/enrichment/delete-extraction-rule.mdx deleted file mode 100644 index fc9571b0ed..0000000000 --- a/docs/api-ref/enrichment/delete-extraction-rule.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: delete /extraction/{rule_id} ---- \ No newline at end of file diff --git a/docs/api-ref/enrichment/delete-rule.mdx b/docs/api-ref/enrichment/delete-rule.mdx deleted file mode 100644 index 4a7a1c3866..0000000000 --- a/docs/api-ref/enrichment/delete-rule.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: delete /mapping/{rule_id} ---- \ No newline at end of file diff --git a/docs/api-ref/enrichment/get-extraction-rules.mdx b/docs/api-ref/enrichment/get-extraction-rules.mdx deleted file mode 100644 index 619c38eca8..0000000000 --- a/docs/api-ref/enrichment/get-extraction-rules.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /extraction ---- \ No newline at end of file diff --git a/docs/api-ref/enrichment/get-rules.mdx b/docs/api-ref/enrichment/get-rules.mdx deleted file mode 100644 index b1ac11c0b9..0000000000 --- a/docs/api-ref/enrichment/get-rules.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /mapping ---- \ No newline at end of file diff --git a/docs/api-ref/enrichment/update-extraction-rule.mdx b/docs/api-ref/enrichment/update-extraction-rule.mdx deleted file mode 100644 index 3b0dfcc7df..0000000000 --- a/docs/api-ref/enrichment/update-extraction-rule.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: put /extraction/{rule_id} ---- \ No newline at end of file diff --git a/docs/api-ref/enrichment/update-rule.mdx b/docs/api-ref/enrichment/update-rule.mdx deleted file mode 100644 index 842be2e45d..0000000000 --- a/docs/api-ref/enrichment/update-rule.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: put /mapping ---- \ No newline at end of file diff --git a/docs/api-ref/groups/get-groups.mdx b/docs/api-ref/groups/get-groups.mdx deleted file mode 100644 index c7d6e31136..0000000000 --- a/docs/api-ref/groups/get-groups.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /groups/ ---- \ No newline at end of file diff --git a/docs/api-ref/healthcheck/healthcheck.mdx b/docs/api-ref/healthcheck/healthcheck.mdx deleted file mode 100644 index c2e4577351..0000000000 --- a/docs/api-ref/healthcheck/healthcheck.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /healthcheck ---- diff --git a/docs/api-ref/mapping/create-mapping.mdx b/docs/api-ref/mapping/create-mapping.mdx deleted file mode 100644 index 6994f6aab7..0000000000 --- a/docs/api-ref/mapping/create-mapping.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /mapping ---- \ No newline at end of file diff --git a/docs/api-ref/mapping/delete-mapping-by-id.mdx b/docs/api-ref/mapping/delete-mapping-by-id.mdx deleted file mode 100644 index 52645c5dde..0000000000 --- a/docs/api-ref/mapping/delete-mapping-by-id.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: delete /mapping/{mapping_id} ---- \ No newline at end of file diff --git a/docs/api-ref/mapping/get-mappings.mdx b/docs/api-ref/mapping/get-mappings.mdx deleted file mode 100644 index b1ac11c0b9..0000000000 --- a/docs/api-ref/mapping/get-mappings.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /mapping ---- \ No newline at end of file diff --git a/docs/api-ref/preset/create-preset.mdx b/docs/api-ref/preset/create-preset.mdx deleted file mode 100644 index 8925cb3231..0000000000 --- a/docs/api-ref/preset/create-preset.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /preset ---- \ No newline at end of file diff --git a/docs/api-ref/preset/delete-preset.mdx b/docs/api-ref/preset/delete-preset.mdx deleted file mode 100644 index 9e770eab09..0000000000 --- a/docs/api-ref/preset/delete-preset.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: delete /preset/{uuid} ---- \ No newline at end of file diff --git a/docs/api-ref/preset/get-presets.mdx b/docs/api-ref/preset/get-presets.mdx deleted file mode 100644 index 33c3d5aeae..0000000000 --- a/docs/api-ref/preset/get-presets.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /preset ---- \ No newline at end of file diff --git a/docs/api-ref/preset/update-preset.mdx b/docs/api-ref/preset/update-preset.mdx deleted file mode 100644 index 669be7d4ca..0000000000 --- a/docs/api-ref/preset/update-preset.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: put /preset/{uuid} ---- \ No newline at end of file diff --git a/docs/api-ref/providers/add-alert.mdx b/docs/api-ref/providers/add-alert.mdx deleted file mode 100644 index cd6c35f17c..0000000000 --- a/docs/api-ref/providers/add-alert.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /providers/{provider_type}/{provider_id}/alerts ---- diff --git a/docs/api-ref/providers/delete-provider.mdx b/docs/api-ref/providers/delete-provider.mdx deleted file mode 100644 index 2e3188d912..0000000000 --- a/docs/api-ref/providers/delete-provider.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: delete /providers/{provider_type}/{provider_id} ---- diff --git a/docs/api-ref/providers/export-providers.mdx b/docs/api-ref/providers/export-providers.mdx deleted file mode 100644 index 24c5b8040b..0000000000 --- a/docs/api-ref/providers/export-providers.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /providers/export ---- diff --git a/docs/api-ref/providers/get-alerts-configuration.mdx b/docs/api-ref/providers/get-alerts-configuration.mdx deleted file mode 100644 index 56570a63e4..0000000000 --- a/docs/api-ref/providers/get-alerts-configuration.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /providers/{provider_type}/{provider_id}/configured-alerts ---- diff --git a/docs/api-ref/providers/get-alerts-schema.mdx b/docs/api-ref/providers/get-alerts-schema.mdx deleted file mode 100644 index 49a5210298..0000000000 --- a/docs/api-ref/providers/get-alerts-schema.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /providers/{provider_type}/schema ---- diff --git a/docs/api-ref/providers/get-installed-providers.mdx b/docs/api-ref/providers/get-installed-providers.mdx deleted file mode 100644 index 4de9cceb22..0000000000 --- a/docs/api-ref/providers/get-installed-providers.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /providers/export ---- \ No newline at end of file diff --git a/docs/api-ref/providers/get-logs.mdx b/docs/api-ref/providers/get-logs.mdx deleted file mode 100644 index a153513c21..0000000000 --- a/docs/api-ref/providers/get-logs.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /providers/{provider_type}/{provider_id}/logs ---- diff --git a/docs/api-ref/providers/get-providers.mdx b/docs/api-ref/providers/get-providers.mdx deleted file mode 100644 index c377cc2661..0000000000 --- a/docs/api-ref/providers/get-providers.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /providers ---- diff --git a/docs/api-ref/providers/get-webhook-settings.mdx b/docs/api-ref/providers/get-webhook-settings.mdx deleted file mode 100644 index 4771808d78..0000000000 --- a/docs/api-ref/providers/get-webhook-settings.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /providers/{provider_type}/webhook ---- diff --git a/docs/api-ref/providers/install-provider-oauth2.mdx b/docs/api-ref/providers/install-provider-oauth2.mdx deleted file mode 100644 index 3eb4a90fb8..0000000000 --- a/docs/api-ref/providers/install-provider-oauth2.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /providers/install/oauth2/{provider_type} ---- \ No newline at end of file diff --git a/docs/api-ref/providers/install-provider-webhook.mdx b/docs/api-ref/providers/install-provider-webhook.mdx deleted file mode 100644 index 251b3d8462..0000000000 --- a/docs/api-ref/providers/install-provider-webhook.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /providers/install/webhook/{provider_type}/{provider_id} ---- diff --git a/docs/api-ref/providers/install-provider.mdx b/docs/api-ref/providers/install-provider.mdx deleted file mode 100644 index e065001572..0000000000 --- a/docs/api-ref/providers/install-provider.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /providers/install ---- diff --git a/docs/api-ref/providers/invoke-provider-method.mdx b/docs/api-ref/providers/invoke-provider-method.mdx deleted file mode 100644 index e80c49497b..0000000000 --- a/docs/api-ref/providers/invoke-provider-method.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /providers/{provider_id}/invoke/{method} ---- \ No newline at end of file diff --git a/docs/api-ref/providers/test-provider.mdx b/docs/api-ref/providers/test-provider.mdx deleted file mode 100644 index 407b69828a..0000000000 --- a/docs/api-ref/providers/test-provider.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /providers/test ---- diff --git a/docs/api-ref/providers/update-provider.mdx b/docs/api-ref/providers/update-provider.mdx deleted file mode 100644 index 1ee02f7edc..0000000000 --- a/docs/api-ref/providers/update-provider.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: put /providers/{provider_id} ---- \ No newline at end of file diff --git a/docs/api-ref/providers/validate-provider-scopes.mdx b/docs/api-ref/providers/validate-provider-scopes.mdx deleted file mode 100644 index 64b6e58549..0000000000 --- a/docs/api-ref/providers/validate-provider-scopes.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /providers/{provider_id}/scopes ---- \ No newline at end of file diff --git a/docs/api-ref/pusher/pusher-authentication.mdx b/docs/api-ref/pusher/pusher-authentication.mdx deleted file mode 100644 index ed9c2b39b3..0000000000 --- a/docs/api-ref/pusher/pusher-authentication.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /pusher/auth ---- \ No newline at end of file diff --git a/docs/api-ref/rules/create-rule.mdx b/docs/api-ref/rules/create-rule.mdx deleted file mode 100644 index 11c79981ed..0000000000 --- a/docs/api-ref/rules/create-rule.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /rules ---- \ No newline at end of file diff --git a/docs/api-ref/rules/delete-rule.mdx b/docs/api-ref/rules/delete-rule.mdx deleted file mode 100644 index 66eb16654f..0000000000 --- a/docs/api-ref/rules/delete-rule.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: delete /rules/{rule_id} ---- \ No newline at end of file diff --git a/docs/api-ref/rules/get-rules.mdx b/docs/api-ref/rules/get-rules.mdx deleted file mode 100644 index 44e0acce0a..0000000000 --- a/docs/api-ref/rules/get-rules.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /rules ---- \ No newline at end of file diff --git a/docs/api-ref/rules/update-rule.mdx b/docs/api-ref/rules/update-rule.mdx deleted file mode 100644 index 1e5125d5f6..0000000000 --- a/docs/api-ref/rules/update-rule.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: put /rules/{rule_id} ---- \ No newline at end of file diff --git a/docs/api-ref/settings/create-key.mdx b/docs/api-ref/settings/create-key.mdx deleted file mode 100644 index c6928f71eb..0000000000 --- a/docs/api-ref/settings/create-key.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /settings/apikey ---- \ No newline at end of file diff --git a/docs/api-ref/settings/create-user.mdx b/docs/api-ref/settings/create-user.mdx deleted file mode 100644 index aa2658e3ef..0000000000 --- a/docs/api-ref/settings/create-user.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /settings/users ---- \ No newline at end of file diff --git a/docs/api-ref/settings/delete-api-key.mdx b/docs/api-ref/settings/delete-api-key.mdx deleted file mode 100644 index ed21cb1bca..0000000000 --- a/docs/api-ref/settings/delete-api-key.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: delete /settings/apikey/{keyId} ---- \ No newline at end of file diff --git a/docs/api-ref/settings/delete-smtp-settings.mdx b/docs/api-ref/settings/delete-smtp-settings.mdx deleted file mode 100644 index 4df0259bd5..0000000000 --- a/docs/api-ref/settings/delete-smtp-settings.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: delete /settings/smtp ---- \ No newline at end of file diff --git a/docs/api-ref/settings/delete-user.mdx b/docs/api-ref/settings/delete-user.mdx deleted file mode 100644 index 807cb53570..0000000000 --- a/docs/api-ref/settings/delete-user.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: delete /settings/users/{user_email} ---- \ No newline at end of file diff --git a/docs/api-ref/settings/get-keys.mdx b/docs/api-ref/settings/get-keys.mdx deleted file mode 100644 index 4c8ca4e816..0000000000 --- a/docs/api-ref/settings/get-keys.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /settings/apikeys ---- \ No newline at end of file diff --git a/docs/api-ref/settings/get-smtp-settings.mdx b/docs/api-ref/settings/get-smtp-settings.mdx deleted file mode 100644 index 0f701924a7..0000000000 --- a/docs/api-ref/settings/get-smtp-settings.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /settings/smtp ---- \ No newline at end of file diff --git a/docs/api-ref/settings/get-users.mdx b/docs/api-ref/settings/get-users.mdx deleted file mode 100644 index 8381a37ef4..0000000000 --- a/docs/api-ref/settings/get-users.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /settings/users ---- \ No newline at end of file diff --git a/docs/api-ref/settings/test-smtp-settings.mdx b/docs/api-ref/settings/test-smtp-settings.mdx deleted file mode 100644 index 64cc998d63..0000000000 --- a/docs/api-ref/settings/test-smtp-settings.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /settings/smtp/test ---- \ No newline at end of file diff --git a/docs/api-ref/settings/update-api-key.mdx b/docs/api-ref/settings/update-api-key.mdx deleted file mode 100644 index fbd6124685..0000000000 --- a/docs/api-ref/settings/update-api-key.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: put /settings/apikey ---- \ No newline at end of file diff --git a/docs/api-ref/settings/update-smtp-settings.mdx b/docs/api-ref/settings/update-smtp-settings.mdx deleted file mode 100644 index acf77b1fcc..0000000000 --- a/docs/api-ref/settings/update-smtp-settings.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /settings/smtp ---- \ No newline at end of file diff --git a/docs/api-ref/settings/webhook-settings.mdx b/docs/api-ref/settings/webhook-settings.mdx deleted file mode 100644 index 2274336eb0..0000000000 --- a/docs/api-ref/settings/webhook-settings.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /settings/webhook ---- diff --git a/docs/api-ref/status/status.mdx b/docs/api-ref/status/status.mdx deleted file mode 100644 index 84b74c746f..0000000000 --- a/docs/api-ref/status/status.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /status ---- \ No newline at end of file diff --git a/docs/api-ref/users/create-user.mdx b/docs/api-ref/users/create-user.mdx deleted file mode 100644 index 119d782569..0000000000 --- a/docs/api-ref/users/create-user.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /users ---- \ No newline at end of file diff --git a/docs/api-ref/users/delete-user-by-email.mdx b/docs/api-ref/users/delete-user-by-email.mdx deleted file mode 100644 index 41d49ec271..0000000000 --- a/docs/api-ref/users/delete-user-by-email.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: delete /users/{user.email} ---- \ No newline at end of file diff --git a/docs/api-ref/users/delete-user.mdx b/docs/api-ref/users/delete-user.mdx deleted file mode 100644 index e9f6100429..0000000000 --- a/docs/api-ref/users/delete-user.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: delete /users/{user_email} ---- \ No newline at end of file diff --git a/docs/api-ref/users/get-users.mdx b/docs/api-ref/users/get-users.mdx deleted file mode 100644 index a3776c02fa..0000000000 --- a/docs/api-ref/users/get-users.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /users ---- \ No newline at end of file diff --git a/docs/api-ref/whoami/get-tenant-id.mdx b/docs/api-ref/whoami/get-tenant-id.mdx deleted file mode 100644 index 947dc60485..0000000000 --- a/docs/api-ref/whoami/get-tenant-id.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /whoami ---- \ No newline at end of file diff --git a/docs/api-ref/workflows/create-workflow.mdx b/docs/api-ref/workflows/create-workflow.mdx deleted file mode 100644 index f6a47e6013..0000000000 --- a/docs/api-ref/workflows/create-workflow.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /workflows ---- diff --git a/docs/api-ref/workflows/delete-workflow-by-id.mdx b/docs/api-ref/workflows/delete-workflow-by-id.mdx deleted file mode 100644 index d59228725e..0000000000 --- a/docs/api-ref/workflows/delete-workflow-by-id.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: delete /workflows/{workflow_id} ---- diff --git a/docs/api-ref/workflows/export-workflows.mdx b/docs/api-ref/workflows/export-workflows.mdx deleted file mode 100644 index fb8dd59c3d..0000000000 --- a/docs/api-ref/workflows/export-workflows.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /workflows/export ---- diff --git a/docs/api-ref/workflows/get-raw-workflow-by-id.mdx b/docs/api-ref/workflows/get-raw-workflow-by-id.mdx deleted file mode 100644 index c7879fb425..0000000000 --- a/docs/api-ref/workflows/get-raw-workflow-by-id.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /workflows/{workflow_id}/raw ---- \ No newline at end of file diff --git a/docs/api-ref/workflows/get-workflow-by-id.mdx b/docs/api-ref/workflows/get-workflow-by-id.mdx deleted file mode 100644 index c61b2bc3e0..0000000000 --- a/docs/api-ref/workflows/get-workflow-by-id.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /workflows/{workflow_id} ---- diff --git a/docs/api-ref/workflows/get-workflow-execution-status.mdx b/docs/api-ref/workflows/get-workflow-execution-status.mdx deleted file mode 100644 index 146c0b2d6f..0000000000 --- a/docs/api-ref/workflows/get-workflow-execution-status.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /workflows/{workflow_id}/runs/{workflow_execution_id} ---- diff --git a/docs/api-ref/workflows/get-workflow-executions-by-alert-fingerprint.mdx b/docs/api-ref/workflows/get-workflow-executions-by-alert-fingerprint.mdx deleted file mode 100644 index 8f5abbcf3a..0000000000 --- a/docs/api-ref/workflows/get-workflow-executions-by-alert-fingerprint.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /workflows/executions ---- \ No newline at end of file diff --git a/docs/api-ref/workflows/get-workflow-executions.mdx b/docs/api-ref/workflows/get-workflow-executions.mdx deleted file mode 100644 index 654dfebdb3..0000000000 --- a/docs/api-ref/workflows/get-workflow-executions.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /workflows/executions/list ---- \ No newline at end of file diff --git a/docs/api-ref/workflows/get-workflows.mdx b/docs/api-ref/workflows/get-workflows.mdx deleted file mode 100644 index 5a87788227..0000000000 --- a/docs/api-ref/workflows/get-workflows.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: get /workflows ---- diff --git a/docs/api-ref/workflows/run-workflow-from-definition.mdx b/docs/api-ref/workflows/run-workflow-from-definition.mdx deleted file mode 100644 index f80b1b7921..0000000000 --- a/docs/api-ref/workflows/run-workflow-from-definition.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /workflows/test ---- \ No newline at end of file diff --git a/docs/api-ref/workflows/run-workflow.mdx b/docs/api-ref/workflows/run-workflow.mdx deleted file mode 100644 index 023879339f..0000000000 --- a/docs/api-ref/workflows/run-workflow.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: post /workflows/{workflow_id}/run ---- diff --git a/docs/api-ref/workflows/update-workflow-by-id.mdx b/docs/api-ref/workflows/update-workflow-by-id.mdx deleted file mode 100644 index 2a8ffae2c2..0000000000 --- a/docs/api-ref/workflows/update-workflow-by-id.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -openapi: put /workflows/{workflow_id} ---- \ No newline at end of file diff --git a/docs/authentication/okta.md b/docs/authentication/okta.md new file mode 100644 index 0000000000..6682213975 --- /dev/null +++ b/docs/authentication/okta.md @@ -0,0 +1,282 @@ +# Okta Integration Guide + +This document provides comprehensive information about the Okta integration in Keep, including configuration, deployment, maintenance, and testing. + +## Overview + +Keep supports Okta as an authentication provider, enabling: +- Single Sign-On (SSO) via Okta +- JWT token validation with JWKS +- User and group management through Okta +- Role-based access control +- Token refresh capabilities + +## Environment Variables + +### Backend Environment Variables + +| Variable | Description | Example | +|----------|-------------|---------| +| `AUTH_TYPE` | Set to `"okta"` to enable Okta authentication | `okta` | +| `OKTA_DOMAIN` | Your Okta domain | `company.okta.com` | +| `OKTA_API_TOKEN` | Admin API token for Okta management | `00aBcD3f4GhIJkl5m6NoPQr` | +| `OKTA_ISSUER` | The issuer URL for your Okta application | `https://company.okta.com/oauth2/default` | +| `OKTA_CLIENT_ID` | Client ID of your Okta application | `0oa1b2c3d4e5f6g7h8i9j` | +| `OKTA_CLIENT_SECRET` | Client Secret of your Okta application | `abcd1234efgh5678ijkl9012` | +| `OKTA_AUDIENCE` | (Optional) The audience for token validation | `api://keep` | + +### Frontend Environment Variables + +| Variable | Description | Example | +|----------|-------------|---------| +| `AUTH_TYPE` | Set to `"OKTA"` to enable Okta authentication | `OKTA` | +| `OKTA_CLIENT_ID` | Client ID of your Okta application | `0oa1b2c3d4e5f6g7h8i9j` | +| `OKTA_CLIENT_SECRET` | Client Secret of your Okta application | `abcd1234efgh5678ijkl9012` | +| `OKTA_ISSUER` | The issuer URL for your Okta application | `https://company.okta.com/oauth2/default` | +| `OKTA_DOMAIN` | Your Okta domain | `company.okta.com` | + +## Okta Configuration + +### Creating an Okta Application + +1. Sign in to your Okta Admin Console +2. Navigate to **Applications** > **Applications** +3. Click **Create App Integration** +4. Select **OIDC - OpenID Connect** as the Sign-in method +5. Choose **Web Application** as the Application type +6. Click **Next** + +### Application Settings + +1. **Name**: Enter a name for your application (e.g., "Keep") +2. **Grant type**: Select Authorization Code +3. **Sign-in redirect URIs**: Enter your app's callback URL, e.g., `https://your-keep-domain.com/api/auth/callback/okta` +4. **Sign-out redirect URIs**: Enter your app's sign-out URL, e.g., `https://your-keep-domain.com/signin` +5. **Assignments**: + - **Skip group assignment for now** or assign to appropriate groups +6. Click **Save** + +### Create API Token + +1. Navigate to **Security** > **API** +2. Select the **Tokens** tab +3. Click **Create Token** +4. Name your token (e.g., "Keep Integration") +5. Copy the generated token value (this will be your `OKTA_API_TOKEN`) + +### Configure OIDC Claims (Optional but Recommended) + +1. Navigate to your application +2. Go to the **Sign On** tab +3. Under **OpenID Connect ID Token**, click **Edit** +4. Add custom claims: + - `keep_tenant_id`: The tenant ID in Keep + - `keep_role`: The user's role in Keep + +## Deployment Instructions + +### Docker Deployment + +Add the required environment variables to your docker-compose file or Kubernetes deployment: + +```yaml +environment: + - AUTH_TYPE=okta + - OKTA_DOMAIN=your-company.okta.com + - OKTA_API_TOKEN=your-api-token + - OKTA_ISSUER=https://your-company.okta.com/oauth2/default + - OKTA_CLIENT_ID=your-client-id + - OKTA_CLIENT_SECRET=your-client-secret +``` + +### Next.js Frontend + +Configure environment variables in your `.env.local` file: + +``` +AUTH_TYPE=OKTA +OKTA_CLIENT_ID=your-client-id +OKTA_CLIENT_SECRET=your-client-secret +OKTA_ISSUER=https://your-company.okta.com/oauth2/default +OKTA_DOMAIN=your-company.okta.com +``` + +### Vercel Deployment + +Add the environment variables in your Vercel project settings. + +## User and Group Management + +### Users + +The system automatically maps Okta users to Keep users. Key mappings: + +- Okta email → Keep email +- Okta firstName → Keep name +- Okta groups → Keep groups +- Custom claim `keep_role` → Keep role (defaults to "user" if not specified) + +### Groups + +Groups in Okta are synchronized with Keep. Groups with names starting with `keep_` are treated as roles. + +### Roles + +Roles are implemented as Okta groups with the prefix `keep_`. For example: +- `keep_admin` → Admin role in Keep +- `keep_user` → User role in Keep + +## Authentication Flow + +1. User accesses Keep application +2. User is redirected to Okta login page +3. After successful authentication, Okta returns an ID token and access token +4. Keep validates the token using Okta's JWKS endpoint +5. Keep extracts user information and permissions from the token +6. When tokens expire, Keep automatically refreshes them using the refresh token + +## Token Refresh + +The refresh token flow is handled automatically by the application: + +1. The system detects when an access token is about to expire +2. It uses the refresh token to obtain a new access token from Okta +3. The new token is stored and used for subsequent requests + +## Testing Strategies + +### Unit Tests + +1. **AuthVerifier Tests**: Test token validation with mock tokens + ```python + def test_okta_verify_bearer_token(): + # Create a mock token with the expected claims + # Initialize the OktaAuthVerifier + # Verify the token is validated correctly + ``` + +2. **IdentityManager Tests**: Test user and group management + ```python + def test_okta_create_user(): + # Mock Okta API responses + # Test creating a user + # Verify the correct API calls are made + ``` + +### Integration Tests + +1. **End-to-End Authentication Flow**: + - Create a test user in Okta + - Attempt to log in to the application + - Verify successful authentication + +2. **Token Refresh Test**: + - Obtain an access token and refresh token + - Wait for token expiration + - Verify token refresh occurs automatically + +3. **Role-Based Access Control**: + - Create users with different roles + - Verify access to different endpoints based on roles + +### Load Tests + +1. **Token Validation Performance**: + - Simulate multiple concurrent requests with tokens + - Measure response time and system load + - Verify JWKS caching is working correctly + +2. **User Management Scaling**: + - Test with a large number of users and groups + - Measure performance of group and user operations + +## Troubleshooting + +### Common Issues + +1. **Invalid Token Errors**: + - Check that `OKTA_ISSUER` matches the issuer in your Okta application + - Verify that token signing algorithm (RS256) is supported + - Check for clock skew between your server and Okta + +2. **API Request Failures**: + - Verify that `OKTA_API_TOKEN` is valid and has sufficient permissions + - Check rate limiting on Okta API + +3. **User Not Found**: + - Verify that the user exists in Okta + - Check user status (active/deactivated) + +### Debugging + +1. Enable debug logging: + ``` + AUTH_DEBUG=true + ``` + +2. Check Okta API logs in the Okta Admin Console + +## Maintenance Considerations + +### Token Rotation + +- Rotate the `OKTA_API_TOKEN` periodically for security +- Update the application with the new token without downtime + +### JWKS Caching + +- The implementation caches JWKS keys for 24 hours +- Adjust the cache duration if needed based on key rotation policy + +### Custom Claims + +- When adding new custom claims, update both Okta configuration and code + +### API Rate Limits + +- Be aware of Okta API rate limits +- Implement retry logic for rate limit errors + +## Code Structure + +### Backend Components + +- **`keep/identitymanager/identity_managers/okta/okta_authverifier.py`**: Handles JWT validation with JWKS +- **`keep/identitymanager/identity_managers/okta/okta_identitymanager.py`**: Manages users, groups, and roles via Okta API + +### Frontend Components + +- **`auth.config.ts`**: NextAuth.js configuration for Okta +- **`authenticationType.ts`**: Defines Okta as an authentication type + +## Security Considerations + +1. **Secure Storage of Secrets**: + - Store `OKTA_CLIENT_SECRET` and `OKTA_API_TOKEN` securely + - Never commit secrets to version control + +2. **Token Validation**: + - Always validate tokens with proper signature verification + - Verify token audience and issuer + +3. **Scoped API Tokens**: + - Use the principle of least privilege for API tokens + +## Future Improvements + +1. **Enhanced Group Mapping**: + - Implement more sophisticated group-to-role mappings + - Support nested groups in Okta + +2. **Custom Authorization Servers**: + - Support multiple Okta authorization servers + - Allow tenant-specific authorization servers + +3. **Custom Scope Handling**: + - Better integrate Okta scopes with Keep permissions + +## Support and Resources + +- [Okta Developer Documentation](https://developer.okta.com/docs/reference/) +- [NextAuth.js Okta Provider Documentation](https://next-auth.js.org/providers/okta) +- [JWT Debugging Tools](https://jwt.io/) \ No newline at end of file diff --git a/docs/cli/commands/extraction-create.mdx b/docs/cli/commands/extraction-create.mdx index 27ba3681e7..eeb31230be 100644 --- a/docs/cli/commands/extraction-create.mdx +++ b/docs/cli/commands/extraction-create.mdx @@ -31,7 +31,7 @@ Usage: keep extraction create [OPTIONS] * Default: `0` * Usage: `--priority ` - The priority of the extraction, higher priority means this rule will execute first. [0<=x<=100]. + The priority of the extraction, higher priority means this rule will execute first. `0<=x<=100`. * `pre` * Type: BOOL diff --git a/docs/cli/commands/mappings-create.mdx b/docs/cli/commands/mappings-create.mdx index 21069cc742..10cf7bd4e3 100644 --- a/docs/cli/commands/mappings-create.mdx +++ b/docs/cli/commands/mappings-create.mdx @@ -45,7 +45,7 @@ Usage: keep mappings create [OPTIONS] * Default: `0` * Usage: `--priority ` - The priority of the mapping, higher priority means this rule will execute first. [0<=x<=100]. + The priority of the mapping, higher priority means this rule will execute first. `0<=x<=100`. * `help`: * Type: BOOL diff --git a/docs/cli/installation.mdx b/docs/cli/installation.mdx index 3020dd4712..3ad5c26e7f 100644 --- a/docs/cli/installation.mdx +++ b/docs/cli/installation.mdx @@ -22,7 +22,10 @@ git clone https://github.com/keephq/keep.git && cd keep Install Keep CLI with `pip`: ```shell -pip install . +# MacOS if python or pip not present: +# brew install python@3.11 +# brew install postgresql +pip3.11 install . ``` or with `poetry`: @@ -36,33 +39,33 @@ From now on, Keep should be installed locally and accessible from your CLI, test keep version ``` -### Test -Get a Slack Incoming Webhook using [this tutorial](https://api.slack.com/messaging/webhooks) and use use Keep to configure it +### Configuration +To get API key, check Keep UI -> your username (bottom left) -> Settings -> API Keys ``` -keep config provider --provider-type slack --provider-id slack-demo +keep config new --url http://backend.my_keep.my_awesome_org.com:backend_port --api-key your_personal_api_key ``` -Paste the Slack Incoming Webhook URL (e.g. https://hooks.slack.com/services/...) and you're good to go 👌 -Let's now execute our example "Paper DB has insufficient disk space" alert +### Test -```bash -keep run --alerts-file examples/workflows/db_disk_space.yml +Now, +``` +keep workflow apply -f examples/workflows/query_clickhouse.yml ``` -Congrats 🥳 You should have received your first "Dunder Mifflin Paper Company" alert in Slack by now. +Congrats 🥳 Check your UI for the new workflow uploaded from the YAML file. ## Docker image (Option 2) ### Install ``` -docker run -v ${PWD}:/app -it us-central1-docker.pkg.dev/keephq/keep/keep-cli config provider --provider-type slack --provider-id slack-demo +docker run -v ${PWD}:/app -v ~/.keep.yaml:/root/.keep.yaml -it us-central1-docker.pkg.dev/keephq/keep/keep-cli keep config new --url http://backend.my_keep.my_awesome_org.com:backend_port --api-key your_personal_api_key ``` ### Test ``` -docker run -v ${PWD}:/app -it us-central1-docker.pkg.dev/keephq/keep/keep-cli -j run --alert-url https://raw.githubusercontent.com/keephq/keep/main/examples/alerts/db_disk_space.yml +docker run -v ${PWD}:/app -v ~/.keep.yaml:/root/.keep.yaml -it us-central1-docker.pkg.dev/keephq/keep/keep-cli workflow apply -f examples/workflows/query_clickhouse.yml ``` diff --git a/docs/cli/overview.mdx b/docs/cli/overview.mdx index f74599697b..b144aebadd 100644 --- a/docs/cli/overview.mdx +++ b/docs/cli/overview.mdx @@ -5,3 +5,9 @@ title: "Overview" Keep CLI allow you to manage Keep from CLI. Start by [installing](/cli/installation) Keep CLI and [running a workflow](/cli/commands/cli-run). + +### Env variables + +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **KEEP_CLI_IGNORE_SSL** | Ignore SSL while connecting to the KEEP API | No | false | "true" or "false" | diff --git a/docs/deployment/authentication.mdx b/docs/deployment/authentication.mdx deleted file mode 100644 index a63a9d40d5..0000000000 --- a/docs/deployment/authentication.mdx +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: "Authentication" -sidebarTitle: Authentication ---- - -## Overview - -Authentication is crucial for securing your application. Different modes can be set up depending on the deployment type. Our system supports three primary authentication strategies. - -Understanding and configuring the appropriate authentication method is essential for protecting your resources and data. Choose the one that aligns with your deployment strategy and security requirements. - -## Multi-tenant authentication -To run Keep's managed platform, we use multi tenant authentication, meaning that different tenants can work separately on Keep's platform. As Keep is fully open source, you can also use it. - - - - -### When to use? -If you run Keep in multi tenant mode, join our slack at https://slack.keephq.dev, ping us and we will be more than happy to help you spin it up. -When you run Keep for more than one tenant with clear separation between tenants. - - -### Supported providers -For multi-tenant authentication, we currently use Auth0, but additional providers could be added (just submit a GitHub issue with the required provider). - -### Setup instructions -To spin up Keep with Auth0 as a provider, set up the following environment variables: -#### Backend -``` -AUTH_TYPE=MULTI_TENANT -AUTH0_MANAGEMENT_DOMAIN= -AUTH0_CLIENT_ID= -AUTH0_CLIENT_SECRET= -AUTH0_AUDIENCE= -``` -#### Frontend -``` -AUTH_TYPE=MULTI_TENANT -AUTH0_CLIENT_ID= -AUTH0_CLIENT_SECRET= -AUTH0_ISSUER= -``` - -## Single tenant authentication -Deploy self-hosted Keep with users management and authentication. - - - - -### When to use -When you self-deploy Keep but still need user management and authentication. - -### Supported providers -Username/Password, but additional providers could be added (just submit a GitHub issue with the required provider). - - -### Setup instructions -The easiest way would be to use [docker-compose-with-auth.yml](https://github.com/keephq/keep/blob/main/docker-compose-with-auth.yml) which populates the following environment variables: - -#### Backend -``` -- AUTH_TYPE=SINGLE_TENANT -- KEEP_JWT_SECRET=keepjwtsecret # should be replaced -- KEEP_DEFAULT_USERNAME=admin # should be replaced -- KEEP_DEFAULT_PASSWORD=keep # should be replaced -``` -#### Frontend -``` -- AUTH_TYPE=SINGLE_TENANT -``` - -## Single tenant - no authentication -Deploy self-hosted Keep without authentication. - -### When to use -When you want to try Keep as quick as possible. For example spinning it up on your localhost. For production usages, you should use authentication. - -### Supported providers -N/A - -### Setup instructions -The default [docker-compose.yml](https://github.com/keephq/keep/blob/main/docker-compose.yaml) file demonstrates how to start Keep without authentication. diff --git a/docs/deployment/authentication/auth0-auth.mdx b/docs/deployment/authentication/auth0-auth.mdx new file mode 100644 index 0000000000..954651165c --- /dev/null +++ b/docs/deployment/authentication/auth0-auth.mdx @@ -0,0 +1,48 @@ +--- +title: "Auth0 Authentication" +--- + + +Keep Cloud: ✅
+Keep Enterprise On-Premises: ✅
+Keep Open Source: ⛔️ +
+ +Keep supports multi-tenant environments through Auth0, enabling separate tenants to operate independently within the same Keep platform. + + + + + +### When to Use + +- **Already using Auth0:** If you are already using Auth0 in your organization, you can leverage it as Keep authentication provider. +- **SSO/SAML:** Auth0 supports various Single Sign-On (SSO) and SAML protocols, allowing you to integrate Keep with your existing identity management systems. + +### Setup Instructions + +To start Keep with Auth0 authentication, set the following environment variables: + +#### Frontend Environment Variables + +| Environment Variable | Description | Required | Default Value | +|--------------------|-----------|:--------:|:-------------:| +| AUTH_TYPE | Set to 'AUTH0' for Auth0 authentication | Yes | - | +| AUTH0_DOMAIN | Your Auth0 domain | Yes | - | +| AUTH0_CLIENT_ID | Your Auth0 client ID | Yes | - | +| AUTH0_CLIENT_SECRET | Your Auth0 client secret | Yes | - | +| AUTH0_ISSUER | Your Auth0 API issuer | Yes | - | + +#### Backend Environment Variables + +| Environment Variable | Description | Required | Default Value | +|--------------------|-----------|:--------:|:-------------:| +| AUTH_TYPE | Set to 'AUTH0' for Auth0 authentication | Yes | - | +| AUTH0_MANAGEMENT_DOMAIN | Your Auth0 management domain | Yes | - | +| AUTH0_CLIENT_ID | Your Auth0 client ID | Yes | - | +| AUTH0_CLIENT_SECRET | Your Auth0 client secret | Yes | - | +| AUTH0_AUDIENCE | Your Auth0 API audience | Yes | - | + +### Example configuration + +Use the `docker-compose-with-auth0.yml` for an easy setup, which includes necessary environment variables for enabling Auth0 authentication. diff --git a/docs/deployment/authentication/azuread-auth.mdx b/docs/deployment/authentication/azuread-auth.mdx new file mode 100644 index 0000000000..e0a8829488 --- /dev/null +++ b/docs/deployment/authentication/azuread-auth.mdx @@ -0,0 +1,202 @@ +--- +title: "Azure AD Authentication" +--- + + +Keep Cloud: ✅
+Keep Enterprise On-Premises: ✅
+Keep Open Source: ⛔️ +
+ +Keep supports enterprise authentication through Azure Entre ID (formerly known as Azure AD), enabling organizations to use their existing Microsoft identity platform for secure access management. + +## When to Use + +- **Microsoft Environment:** If your organization uses Microsoft 365 or Azure services, Azure AD integration provides seamless authentication. +- **Enterprise SSO:** Leverage Azure AD's Single Sign-On capabilities for unified access management. + +## Setup Instructions (on Azure AD) + +### Creating an Azure AD Application + +1. Sign in to the [Azure Portal](https://portal.azure.com) +2. Navigate to **Microsoft Entra ID** > **App registrations** > **New registration** + + + Azure AD App Registration + + +3. Configure the application: + - Name: "Keep" + +Note that we are using "Register an application to integrate with Microsoft Entra ID (App you're developing)" since you're self-hosting Keep and need direct control over the authentication flow and permissions for your specific instance - unlike the cloud/managed version where Keep's team has already configured a centralized application registration. + + + Azure AD App Registration + + +4. Configure the application (continue) +- Supported account types: "Single tenant" + + +We recommend using "Single tenant" for enhanced security as it restricts access to users within your organization only. While multi-tenant configuration is possible, it would allow users from any Azure AD directory to access your Keep instance, which could pose security risks unless you have specific cross-organization requirements. + + + - Redirect URI: "Web" + your redirect URI + + +We use "Web" platform instead of "Single Page Application (SPA)" because Keep's backend handles the authentication flow using client credentials/secrets, which is more secure than the implicit flow used in SPAs. This prevents exposure of tokens in the browser and provides stronger security through server-side token validation and refresh token handling. + + + +For localhost, the redirect would be http://localhost:3000/api/auth/callback/microsoft-entra-id + +For production, it should be something like http://your_keep_frontend_domain/api/auth/callback/microsoft-entra-id + + + + + Azure AD App Registration + + +5. Finally, click "register" + +### Configure Authentication +After we created the application, let's configure the authentication. + +1. Go to "App Registrations" -> "All applications" + + + Azure AD Authentication Configuration + + + +2. Click on your application -> "Add a certificate or secret" + + + Azure AD Authentication Configuration + + + +3. Click on "New client secret" and give it a name + + + Azure AD Authentication Configuration + + +4. Keep the "Value", we will use it soon as `KEEP_AZUREAD_CLIENT_SECRET` + + + Azure AD Authentication Configuration + + +### Configure Groups + +Keep maps Azure AD groups to roles with two default groups: +1. Admin Group (read + write) +2. NOC Group (read only) + +To create those groups, go to Groups -> All groups and create two groups: + + + Azure AD Authentication Configuration + + +Keep the Object id of these groups and use it as `KEEP_AZUREAD_ADMIN_GROUP_ID` and `KEEP_AZUREAD_NOC_GROUP_ID`. + +### Configure Group Claims + +1. Navigate to **Token configuration** + + + Azure AD Authentication Configuration + + + +2. Add groups claim: + - Select "Security groups" and "Groups assigned to the application" + - Choose "Group ID" as the claim value + + + Azure AD Authentication Configuration + + + + + Azure AD Authentication Configuration + + +### Configure Application Scopes + +1. Go to "Expose an API" and click on "Add a scope" + + + Azure AD Authentication Configuration + + +2. Keep the default Application ID and click "Save and continue" + + + Azure AD Authentication Configuration + + +3. Add "default" as scope name, also give a display name and description + + + Azure AD Authentication Configuration + + +3. Finally, click "Add scope" + + + Azure AD Authentication Configuration + + +## Setup Instructions (on Keep) + +After you configured Azure AD you should have the following: +1. Azure AD Tenant ID +2. Azure AD Client ID + +How to get: + + + Azure AD Authentication Configuration + + +3. Azure AD Client Secret [See Configure Authentication](#configure-authentication). +4. Azure AD Group ID's for Admins and NOC (read only) [See Configure Groups](#configure-groups). + + +### Configuration + +#### Frontend + +| Environment Variable | Description | Required | Default Value | +|--------------------|-------------|:---------:|:-------------:| +| AUTH_TYPE | Set to 'AZUREAD' for Azure AD authentication | Yes | - | +| KEEP_AZUREAD_CLIENT_ID | Your Azure AD application (client) ID | Yes | - | +| KEEP_AZUREAD_CLIENT_SECRET | Your client secret | Yes | - | +| KEEP_AZUREAD_TENANT_ID | Your Azure AD tenant ID | Yes | - | +| NEXTAUTH_URL | Your Keep application URL | Yes | - | +| NEXTAUTH_SECRET | Random string for NextAuth.js | Yes | - | + +#### Backend + +| Environment Variable | Description | Required | Default Value | +|--------------------|-------------|:---------:|:-------------:| +| AUTH_TYPE | Set to 'AZUREAD' for Azure AD authentication | Yes | - | +| KEEP_AZUREAD_TENANT_ID | Your Azure AD tenant ID | Yes | - | +| KEEP_AZUREAD_CLIENT_ID | Your Azure AD application (client) ID | Yes | - | +| KEEP_AZUREAD_ADMIN_GROUP_ID | The group ID of Keep Admins (read write) | Yes | - | +| KEEP_AZUREAD_NOC_GROUP_ID | The group ID of Keep NOC (read only) | Yes | - | + +## Features and Limitations + +#### Supported Features +- Single Sign-On (SSO) +- Role-based access control through Azure AD groups +- Multi-factor authentication (when configured in Azure AD) + +#### Limitations +See [Overview](/deployment/authentication/overview) diff --git a/docs/deployment/authentication/db-auth.mdx b/docs/deployment/authentication/db-auth.mdx new file mode 100644 index 0000000000..4e4b0dd034 --- /dev/null +++ b/docs/deployment/authentication/db-auth.mdx @@ -0,0 +1,33 @@ +--- +title: "DB Authentication" +--- + +For applications requiring user management and authentication, Keep supports basic authentication with username and password. + + + + + + +### When to Use + +- **Self-Hosted Deployments:** When you're deploying Keep for individual use or within an organization. +- **Enhanced Security:** Provides a simple yet effective layer of security for your Keep instance. + +### Setup Instructions + +To start Keep with DB authentication, set the following environment variables: + +| Environment Variable | Description | Required | Frontend/Backend | Default Value | +|--------------------|:-----------:|:--------:|:----------------:|:-------------:| +| AUTH_TYPE | Set to 'DB' for database authentication | Yes | Both | - | +| KEEP_JWT_SECRET | Secret for JWT token generation | Yes | Backend | - | +| KEEP_DEFAULT_USERNAME | Default admin username | No | Backend | keep | +| KEEP_DEFAULT_PASSWORD | Default admin password | No | Backend | keep | +| KEEP_FORCE_RESET_DEFAULT_PASSWORD | Override the current admin password | No | Backend | false | + +### Example configuration + +Use the `docker-compose-with-auth.yml` for an easy setup, which includes necessary environment variables for enabling basic authentication. diff --git a/docs/deployment/authentication/keycloak-auth.mdx b/docs/deployment/authentication/keycloak-auth.mdx new file mode 100644 index 0000000000..b8ccd3605e --- /dev/null +++ b/docs/deployment/authentication/keycloak-auth.mdx @@ -0,0 +1,59 @@ +--- +title: "Keycloak Authentication" +--- + + +Keep Cloud: ✅
+Keep Enterprise On-Premises: ✅
+Keep Open Source: ⛔️ +
+ +Keep supports Keycloak in a "managed" way where Keep auto-provisions all resources (realm, client, etc.). Keep can also work with externally managed Keycloak. To learn how, please contact the team on [Slack](https://slack.keephq.dev). + +Keep integrates with Keycloak to provide a powerful and flexible authentication system for multi-tenant applications, supporting Single Sign-On (SSO) and SAML. + + + + + +### When to Use + +- **On Prem:** When deploying Keep on-premises and requiring a robust authentication system. +- **OSS:** If you prefer using open-source software for your authentication needs. +- **Enterprise Protocols:** When you need support for enterprise-level protocols like SAML and OpenID Connect. +- **Fully Customized:** When you need a highly customizable authentication solution. +- **RBAC:** When you require Role-Based Access Control for managing user permissions. +- **User and Group Management:** When you need advanced user and group management capabilities. + +### Setup Instructions + +To start Keep with Keycloak authentication, set the following environment variables: + +#### Frontend Environment Variables + +| Environment Variable | Description | Required | Default Value | +|--------------------|-----------|:--------:|:-------------:| +| AUTH_TYPE | Set to 'KEYCLOAK' for Keycloak authentication | Yes | - | +| KEYCLOAK_ID | Your Keycloak client ID (e.g. keep) | Yes | - | +| KEYCLOAK_ISSUER | Full URL to Your Keycloak issuer URL e.g. http://localhost:8181/auth/realms/keep | Yes | - | +| KEYCLOAK_SECRET | Your Keycloak client secret | Yes | keep-keycloak-secret | + +#### Backend Environment Variables + +| Environment Variable | Description | Required | Default Value | +|--------------------|-----------|:--------:|:-------------:| +| AUTH_TYPE | Set to 'KEYCLOAK' for Keycloak authentication | Yes | - | +| KEYCLOAK_URL | Full URL to your Keycloak server | Yes | http://localhost:8181/auth/ | +| KEYCLOAK_REALM | Your Keycloak realm | Yes | keep | +| KEYCLOAK_CLIENT_ID | Your Keycloak client ID | Yes | keep | +| KEYCLOAK_CLIENT_SECRET | Your Keycloak client secret | Yes | keep-keycloak-secret | +| KEYCLOAK_ADMIN_USER | Admin username for Keycloak | Yes | keep_admin | +| KEYCLOAK_ADMIN_PASSWORD | Admin password for Keycloak | Yes | keep_admin | +| KEYCLOAK_AUDIENCE | Audience for Keycloak | Yes | realm-management | + + +### Example configuration + +To get a better understanding on how to use Keep together with Keycloak, you can: +- See [Keycloak](https://github.com/keephq/keep/tree/main/keycloak) directory for configuration, realm.json, etc +- See Keep + Keycloak [docker-compose example](https://github.com/keephq/keep/blob/main/keycloak/docker-compose.yaml) diff --git a/docs/deployment/authentication/no-auth.mdx b/docs/deployment/authentication/no-auth.mdx new file mode 100644 index 0000000000..cde56e73ef --- /dev/null +++ b/docs/deployment/authentication/no-auth.mdx @@ -0,0 +1,23 @@ +--- +title: "No Authentication" +--- +Using this configuration in production is not secure and strongly discouraged. + + +Deploying Keep without authentication is the quickest way to get up and running, ideal for local development or internal tools where security is not a concern. +## Setup Instructions +Either if you use docker-compose, kubernetes, openshift or any other deployment method, add the following environment variable: +``` +# Frontend +AUTH_TYPE=NOAUTH + +# Backend +AUTH_TYPE=NOAUTH +``` +## Implications +With `AUTH_TYPE=NOAUTH`: +- Keep won't show any login page and will let you consume APIs without authentication. +- Keep will use a JWT with "keep" as the tenant id, but will not validate it. +- Any API key provided in the `x-api-key` header will be accepted without validation. + +This configuration essentially bypasses all authentication checks, making it unsuitable for production environments where security is a concern. diff --git a/docs/deployment/authentication/oauth2-proxy-gitlab.mdx b/docs/deployment/authentication/oauth2-proxy-gitlab.mdx new file mode 100644 index 0000000000..fd35f2f573 --- /dev/null +++ b/docs/deployment/authentication/oauth2-proxy-gitlab.mdx @@ -0,0 +1,241 @@ +--- +title: "Example: OAuth2‑Proxy + Keep + GitLab SSO" +--- + +A **step‑by‑step cookbook** for adding single‑sign‑on to [Keep](https://github.com/keephq) with your **self‑hosted GitLab** using [oauth2‑proxy](https://oauth2‑proxy.github.io/) and the NGINX Ingress Controller. + +> **Conventions used below** +> +> * ``             – public FQDN where users access Keep (e.g. `keep.example.com`) +> * ``           – URL of your GitLab instance (e.g. `gitlab.example.com`) +> * ``         – container registry that stores images (omit if you use the public images) +> * Kubernetes namespace **`keep`** – feel free to change it everywhere if you prefer another namespace. + +--- + +## 1. Prerequisites + +| What | Why | +| ------------------------------------------- | ----------------------------------------------------- | +| Kubernetes cluster & `keep` namespace | Where Keep, oauth2‑proxy and Services live | +| **ingress‑nginx** (or compatible) | Provides the `auth_request` feature oauth2‑proxy uses | +| GitLab 15 + at `https://` | OpenID‑Connect issuer | +| Helm 3.x & offline charts/images (optional) | If your cluster has no Internet egress | + +--- + +## 2. Create the GitLab OAuth application + +1. **GitLab ▸ Admin → Applications → New** +2. Name → `keep‑sso` +3. Redirect URI → `https:///oauth2/callback` +4. Scopes → `openid profile email` (+ `read_api` if you plan to gate access by group/project) +5. Save – copy the generated **Application ID** and **Secret**. + +--- + +## 3. Kubernetes secrets & config + +```bash +# 3.1 Generate a 32‑byte cookie secret +echo "$(openssl rand -base64 32 | head -c 32 | base64)" > cookie.b64 + +# 3.2 Store GitLab credentials and cookie secret +kubectl -n keep create secret generic oauth2-proxy \ + --from-literal=client-id= \ + --from-literal=client-secret= \ + --from-file=cookie-secret=cookie.b64 + +# 3.3 Add gitlab credentials and cookie secret using OAUTH2_PROXY ENV variables +OAUTH2_PROXY_CLIENT_ID= +OAUTH2_PROXY_CLIENT_SECRET= +OAUTH2_PROXY_COOKIE_SECRET=cookie.b64 + +# (optional) store GitLab’s custom CA certificate +kubectl -n keep create secret generic gitlab-ca \ + --from-file=gitlab-ca.pem +``` + +```yaml +# 3.4 oauth2_proxy.cfg (ConfigMap) +apiVersion: v1 +kind: ConfigMap +metadata: + name: oauth2-proxy + namespace: keep +data: + oauth2_proxy.cfg: | + email_domains = ["*"] + upstreams = ["file:///dev/null"] # we only use auth‑request mode + provider = "gitlab" + cookie_name = "keep-dev" #if empty, will use default cookie name: _oauth2_proxy + cookie_secure = true +``` + +--- + +## 4. Deploy **oauth2‑proxy** (Helm) + +```yaml +# values.oauth2-proxy.yaml – minimal baseline +image: # replace with public image if desired + repository: /oauth2-proxy/oauth2-proxy + tag: v7.9.0 + +config: + configFile: |- + # content comes from the ConfigMap above + +extraArgs: + oidc-issuer-url: https:// + set-xauthrequest: "true" # add X-Auth-Request-*/X-Forwarded-* headers + pass-authorization-header: "true" # add Authorization: Bearer + # provider-ca-file: /ca/gitlab-ca.pem # enable if you mounted a corporate CA or use ssl-insecure-skip-verify: "true" to disable SSL check. +extraVolumes: + - name: gitlab-ca + secret: + secretName: gitlab-ca +extraVolumeMounts: + - name: gitlab-ca + mountPath: /ca/gitlab-ca.pem + subPath: gitlab-ca.pem + readOnly: true + +service: + type: ClusterIP + +ingress: + enabled: false # we only need an internal Service +``` + +```bash +helm repo add oauth2-proxy https://oauth2-proxy.github.io/manifests +helm upgrade --install oauth2-proxy oauth2-proxy/oauth2-proxy \ + -n keep -f values.oauth2-proxy.yaml +``` + +*Lab‑only shortcut*: instead of mounting the CA you can temporarily add +`ssl-insecure-skip-verify: "true"` under `extraArgs`. + +--- + +## 5. Patch (or create) Keep’s Ingress resource + +Add **three** annotations so ingress‑nginx delegates auth to the Service: + +```yaml +global: + ingress: + annotations: + nginx.ingress.kubernetes.io/auth-url: "http://oauth2-proxy.keep.svc.cluster.local/oauth2/auth" + nginx.ingress.kubernetes.io/auth-signin: "https:///oauth2/start?rd=$request_uri" + nginx.ingress.kubernetes.io/auth-response-headers: "authorization,x-auth-request-user,x-auth-request-email,x-forwarded-user,x-forwarded-email,x-forwarded-groups" +``` + +Redeploy Keep (or patch the Ingress manually). + +--- + +## 6. Environment variables for Keep + +```yaml +backend: + env: + - name: AUTH_TYPE + value: OAUTH2PROXY + - name: KEEP_OAUTH2_PROXY_USER_HEADER + value: x-auth-request-email + - name: KEEP_OAUTH2_PROXY_ROLE_HEADER + value: x-auth-request-groups + - name: KEEP_OAUTH2_PROXY_AUTO_CREATE_USER + value: true + - name: KEEP_OAUTH2_PROXY_ADMIN_ROLES + value: + - name: KEEP_OAUTH2_PROXY_NOC_ROLES + value: + +frontend: + env: + # Public URL the **browser** should use + - name: NEXTAUTH_URL + value: "https://" + + # URL the **server‑side** Next.js code can always reach + - name: NEXTAUTH_URL_INTERNAL + value: "http://keep-frontend.keep.svc.cluster.local:3000" + + # API URLs + - name: API_URL_CLIENT # browser → ingress + value: "/v2" + - name: API_URL # server → backend Service (no auth‑proxy) + value: "http://keep-backend.keep.svc.cluster.local:8080" + + #Oauth2-Proxy + - name: AUTH_TYPE + value: OAUTH2PROXY + - name: KEEP_OAUTH2_PROXY_USER_HEADER + value: x-auth-request-email + - name: KEEP_OAUTH2_PROXY_ROLE_HEADER + value: x-auth-request-groups +``` + +Roll out the frontend: + +```bash +kubectl -n keep rollout restart deploy/keep-frontend +``` + +--- + +## 7. Quick validation + +```bash +# 7.1 Call auth endpoint without cookie – expect 401 +curl -I http://oauth2-proxy.keep.svc.cluster.local/oauth2/auth + +# 7.2 Copy the keep-dev cookie from your browser session +curl -I --cookie "keep-dev=" \ + http://oauth2-proxy.keep.svc.cluster.local/oauth2/auth # expect 200 +``` + +Browser smoke‑test: + +* `https://` → redirect to GitLab → sign in → return to Keep. +* DevTools ▸ Network → `/api/auth/session` returns **200**. + +--- + +## 8. Troubleshooting + +| Symptom | Common cause & remedy | +| ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | +| **TLS error** `x509: certificate signed by unknown authority` | Mount your GitLab CA (`provider-ca-file`) or set `ssl-insecure-skip-verify=true` (dev only). | +| Ingress logs `auth request unexpected status: 502` | `auth-url` is pointing at the external host – use the internal Service DNS (`http://oauth2-proxy.keep.svc.cluster.local`). | +| Browser loops at `/signin?callbackUrl=…` | ① `set-xauthrequest` not enabled, or ② `auth-response-headers` not set, or ③ backend receives calls through oauth2‑proxy (`API_URL` wrong). | +| Redirect to `0.0.0.0:3000` or pod name | `NEXTAUTH_URL` missing at **build time**; rebuild UI or override env. | +| 401 from `/oauth2/auth` even with cookie | Cookie expired / clocks out of sync. Clear cookie and re‑login. | + +--- + +## 9. Clean‑up + +```bash +helm -n keep uninstall oauth2-proxy +helm -n keep uninstall keep # if you want to remove Keep +kubectl -n keep delete secret oauth2-proxy gitlab-ca +``` + +--- + +## Appendix A – Generate a 32‑byte cookie secret + +```bash +openssl rand -hex 16 | xxd -r -p | base64 +``` + +## Appendix B – Sync images to an offline registry (example) + +```bash +skopeo copy docker://quay.io/oauth2-proxy/oauth2-proxy:v7.9.0 \ + docker:///oauth2-proxy/oauth2-proxy:v7.9.0 +``` diff --git a/docs/deployment/authentication/oauth2proxy-auth.mdx b/docs/deployment/authentication/oauth2proxy-auth.mdx new file mode 100644 index 0000000000..5969988c66 --- /dev/null +++ b/docs/deployment/authentication/oauth2proxy-auth.mdx @@ -0,0 +1,39 @@ +--- +title: "OAuth2Proxy Authentication" +--- + + +Keep Cloud: ✅
+Keep Enterprise On-Premises: ✅
+Keep Open Source: (experimental) +
+ +Delegate authentication to Oauth2Proxy. + +### When to Use + +- **oauth2-proxy user:** Use this authentication method if you want to delegate authentication to an external Oauth2Proxy service. + +### Setup Instructions + +To start Keep with Oauth2Proxy authentication, set the following environment variables: + +#### Frontend Environment Variables + +| Environment Variable | Description | Required | Default Value | +|--------------------|-----------|:--------:|:-------------:| +| AUTH_TYPE | Set to 'OAUTH2PROXY' for OAUTH2PROXY authentication | Yes | - | +| KEEP_OAUTH2_PROXY_USER_HEADER | Header for the authenticated user's email | Yes | x-forwarded-email | +| KEEP_OAUTH2_PROXY_ROLE_HEADER | Header for the authenticated user's role | Yes | x-forwarded-groups | + +#### Backend Environment Variables + +| Environment Variable | Description | Required | Default Value | +|--------------------|-----------|:--------:|:-------------:| +| AUTH_TYPE | Set to 'OAUTH2PROXY' for OAUTH2PROXY authentication | Yes | - | +| KEEP_OAUTH2_PROXY_USER_HEADER | Header for the authenticated user's email | Yes | x-forwarded-email | +| KEEP_OAUTH2_PROXY_ROLE_HEADER | Header for the authenticated user's role | Yes | x-forwarded-groups | +| KEEP_OAUTH2_PROXY_AUTO_CREATE_USER | Automatically create user if not exists | No | true | +| KEEP_OAUTH2_PROXY_ADMIN_ROLES | Role names for admin users | No | admin | +| KEEP_OAUTH2_PROXY_NOC_ROLES | Role names for NOC (Network Operations Center) users | No | noc | +| KEEP_OAUTH2_PROXY_WEBHOOK_ROLES | Role names for webhook users | No | webhook | diff --git a/docs/deployment/authentication/okta-auth.mdx b/docs/deployment/authentication/okta-auth.mdx new file mode 100644 index 0000000000..5b05d0cf47 --- /dev/null +++ b/docs/deployment/authentication/okta-auth.mdx @@ -0,0 +1,73 @@ +--- +title: "Okta Authentication" +--- + +This document provides comprehensive information about the Okta integration in Keep. + +## Overview + +Keep supports Okta as an authentication provider, enabling: +- Single Sign-On (SSO) via Okta +- OAuth2/OIDC authentication flow +- JWT token verification with JWKS +- Role-based access control through token claims + +## Environment Variables + +### Backend Environment Variables + +| Variable | Description | Required | +|----------|-------------|----------| +| `AUTH_TYPE` | Set to `"OKTA"` to enable Okta authentication | Yes | +| `OKTA_DOMAIN` | Your Okta domain (e.g., `https://company.okta.com`) | Yes | +| `OKTA_ISSUER` | The issuer URL for your Okta authorization server (e.g., `https://company.okta.com/oauth2/default`) | Yes | +| `OKTA_CLIENT_ID` | Client ID of your Okta application | Yes | +| `OKTA_CLIENT_SECRET` | Client Secret of your Okta application | Yes | +| `OKTA_AUDIENCE` | Expected audience claim in the token. Falls back to `OKTA_CLIENT_ID` if not set | No | +| `OKTA_JWKS_URL` | Explicit JWKS URL. If not set, derived from `OKTA_ISSUER` | No | +| `OKTA_API_TOKEN` | Okta API token for management operations | No | + +### Frontend Environment Variables + +| Variable | Description | Example | +|----------|-------------|---------| +| `AUTH_TYPE` | Set to `"OKTA"` to enable Okta authentication | `OKTA` | +| `OKTA_ISSUER` | The issuer URL for your Okta authorization server | `https://company.okta.com/oauth2/default` | +| `OKTA_CLIENT_ID` | Client ID of your Okta application | `0oa1bcdef2ghijklm3n4` | +| `OKTA_CLIENT_SECRET` | Client Secret of your Okta application | `abcd1234efgh5678` | + +## Okta Configuration + +### Creating an Okta Application + +1. Sign in to your Okta Admin Console +2. Navigate to **Applications** > **Applications** +3. Click **Create App Integration** +4. Select **OIDC - OpenID Connect** as the sign-in method +5. Select **Web Application** as the application type +6. Click **Next** + +### Application Settings + +1. **App integration name**: Enter a name for your application (e.g., "Keep") +2. **Sign-in redirect URIs**: Add your callback URL: `https://your-keep-domain.com/api/auth/callback/okta` +3. **Sign-out redirect URIs**: Add your sign-out URL: `https://your-keep-domain.com` +4. **Assignments**: Assign the application to the appropriate users or groups +5. Click **Save** +6. Copy the **Client ID** and **Client Secret** from the application settings + +### Role Mapping + +Keep extracts the user role from the JWT token. The role is determined in the following order: + +1. `keep_role` claim in the token +2. `role` claim in the token +3. First entry in the `groups` claim +4. Falls back to `user` role + +To configure role mapping, add a custom claim to your Okta authorization server: + +1. Navigate to **Security** > **API** > **Authorization Servers** +2. Select your authorization server (e.g., `default`) +3. Go to the **Claims** tab +4. Add a claim named `keep_role` or `groups` that maps to the user's Keep role diff --git a/docs/deployment/authentication/onelogin-auth.mdx b/docs/deployment/authentication/onelogin-auth.mdx new file mode 100644 index 0000000000..4d287a78b3 --- /dev/null +++ b/docs/deployment/authentication/onelogin-auth.mdx @@ -0,0 +1,64 @@ +--- +title: "OneLogin Authentication" +--- + +This document provides comprehensive information about the OneLogin integration in Keep + +## Overview + +Keep supports OneLogin as an authentication provider, enabling: +- Single Sign-On (SSO) via OneLogin +- OAuth2/OIDC authentication flow +- Token refresh capabilities +- Role-based access control through custom claims +- Session management through NextAuth.js + +## Environment Variables + +### Backend Environment Variables + +| Variable | Description | Example | +|----------|-------------|---------| +| `AUTH_TYPE` | Set to `"ONELOGIN"` to enable OneLogin authentication | `ONELOGIN` | +| `ONELOGIN_ISSUER` | The issuer URL for your OneLogin application | `https://company.onelogin.com/oidc/2` | +| `ONELOGIN_CLIENT_ID` | Client ID of your OneLogin application | `abc123def456ghi789` | +| `ONELOGIN_CLIENT_SECRET` | Client Secret of your OneLogin application | `abcd1234efgh5678ijkl9012` | +| `ONELOGIN_ADMIN_ROLE` | Role to be mapped to a keep admin role | `KeepAdmin` | +| `ONELOGIN_NOC_ROLE` | Role to be mapped to a keep noc role | `KeepNoc` | +| `ONELOGIN_WEBHOOK_ROLE` | Role to be mapped to a keep webhook role | `KeepWebhook` | +| `ONELOGIN_AUTO_CREATE_USER` | Whether to try and create autocreate users in keep | `True` | + +### Frontend Environment Variables + +| Variable | Description | Example | +|----------|-------------|---------| +| `AUTH_TYPE` | Set to `"ONELOGIN"` to enable OneLogin authentication | `ONELOGIN` | +| `ONELOGIN_ISSUER` | The issuer URL for your OneLogin application | `https://company.onelogin.com/oidc/2` | +| `ONELOGIN_CLIENT_ID` | Client ID of your OneLogin application | `abc123def456ghi789` | +| `ONELOGIN_CLIENT_SECRET` | Client Secret of your OneLogin application | `abcd1234efgh5678ijkl9012` | + +## OneLogin Configuration + +### Creating a OneLogin Application + +1. Sign in to your OneLogin Admin Console +2. Navigate to **Applications** +3. Click **Add App** +4. Search for **OpenId Connect (OIDC)** and select it +5. Click **Save** + +### Application Settings + +1. **Display Name**: Enter a name for your application (e.g., "Keep") +2. **Redirect URIs**: Enter your app's callback URL, e.g., `https://your-keep-domain.com/api/auth/callback/onelogin` +3. **Login URL**: Enter your app's login URL, e.g., `https://your-keep-domain.com/signin` +4. **Role Mapping**: + - Go to the Parameters tab + - Map the groups to user roles or groups with the default value being semicolon delimited input values +5. Go to the **SSO** tab and configure: + - **Application Type**: Web + - **Token Endpoint**: Client Secret Post +6. **Access**: + - Assign to appropriate roles or users +7. Click **Save** +8. Copy the client id, client secret and issuer URL from the SSO tab diff --git a/docs/deployment/authentication/overview.mdx b/docs/deployment/authentication/overview.mdx new file mode 100644 index 0000000000..b32703f599 --- /dev/null +++ b/docs/deployment/authentication/overview.mdx @@ -0,0 +1,54 @@ +--- +title: "Overview" +--- + +For every authentication-related question or issue, please join our [Slack](https://slack.keephq.dev). + +Keep supports various authentication providers and architectures to accommodate different deployment strategies and security needs, from development environments to production setups. + + +### Authentication Providers + +- [**No Authentication**](/deployment/authentication/no-auth) - Quick setup for testing or internal use cases. +- [**DB**](/deployment/authentication/db-auth) - Simple username/password authentication. Works well for small teams or for dev/stage environments. Users and hashed password are stored on DB. +- [**Auth0**](/deployment/authentication/auth0-auth) - Utilize Auth0 for scalable, auth0-based authentication. +- [**Keycloak**](/deployment/authentication/keycloak-auth) - Utilize Keycloak for enterprise authentication methods such as SSO/SAML/OIDC, advanced RBAC with custom roles, resource-level permissions, and integration with user directories (LDAP). +- [**AzureAD**](/deployment/authentication/azuread-auth) - Utilize Azure AD for SSO/SAML/OIDC nterprise authentication. +- [**Okta**](/deployment/authentication/okta-auth) - Utilize Okta for SSO/OIDC authentication. +- [**OneLogin**](/deployment/authentication/onelogin-auth) - Utilize OneLogin for SSO/OIDC authentication. + +Choosing the right authentication strategy depends on your specific use case, security requirements, and deployment environment. You can read more about each authentication provider. + + + +### Authentication Features Comparison + +| Identity Provider | RBAC | SAML/OIDC/SSO | LDAP | Resource-based permission | User Management | Group Management | On Prem | License | +|:---:|:----:|:---------:|:----:|:-------------------------:|:----------------:|:-----------------:|:-------:|:-------:| +| **No Auth** | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | **OSS** | +| **DB** | ✅
(Predefiend roles) | ❌ | ❌ | ✅ | ✅ | ❌ | ✅ | **OSS** | +| **Auth0** | ✅
(Predefiend roles) | ✅ | 🚧 | 🚧 | ✅ | 🚧 | ❌ | **EE** | +| **Keycloak** | ✅
(Custom roles) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | **EE** | +| **Oauth2Proxy** | ✅
(Predefiend roles) | ✅ | ❌ | ❌ | N/A | N/A | ✅ | **OSS** | +| **Azure AD** | ✅
(Predefiend roles) | ✅ | ❌ | ❌ | By Azure AD | By Azure AD | ✅ | **EE** | +| **Okta** | ✅
(Predefiend roles) | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | **OSS** | +| **OneLogin** | ✅
(Predefiend roles) | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | **OSS** | +### How To Configure + +Some authentication providers require additional environment variables. These will be covered in detail on the specific authentication provider pages. + +The authentication scheme on Keep is controlled with environment variables both on the backend (Keep API) and the frontend (Keep UI). + + +| Identity Provider | Environment Variable | Additional Variables Required | +| ------------------------------------- | -------------------------------------------------------------- | ---------------------------- | +| **No Auth** | `AUTH_TYPE=NOAUTH`| None | +| **DB** | `AUTH_TYPE=DB` | `KEEP_JWT_SECRET` | +| **Auth0** | `AUTH_TYPE=AUTH0` | `AUTH0_DOMAIN`, `AUTH0_CLIENT_ID`, `AUTH0_CLIENT_SECRET` | +| **Keycloak** | `AUTH_TYPE=KEYCLOAK` | `KEYCLOAK_URL`, `KEYCLOAK_REALM`, `KEYCLOAK_CLIENT_ID`, `KEYCLOAK_CLIENT_SECRET` | +| **Oauth2Proxy** | `AUTH_TYPE=OAUTH2PROXY` | `OAUTH2_PROXY_USER_HEADER`, `OAUTH2_PROXY_ROLE_HEADER`, `OAUTH2_PROXY_AUTO_CREATE_USER` | +| **AzureAD** | `AUTH_TYPE=AZUREAD` | See [AzureAD Configuration](/deployment/authentication/azuread-auth) | +| **Okta** | `AUTH_TYPE=OKTA` | `OKTA_DOMAIN`, `OKTA_CLIENT_ID`, `OKTA_CLIENT_SECRET` | +| **OneLogin** | `AUTH_TYPE=ONELOGIN` | See [OneLogin Configuration](/deployment/authentication/onelogin-auth) | + +For more details on each authentication strategy, including setup instructions and implications, refer to the respective sections. diff --git a/docs/deployment/configuration.mdx b/docs/deployment/configuration.mdx new file mode 100644 index 0000000000..1464c7eb81 --- /dev/null +++ b/docs/deployment/configuration.mdx @@ -0,0 +1,411 @@ +--- +title: "Configuration" +sidebarTitle: "Configuration" +--- + +## Background + +Keep is highly configurable through environment variables. This allows you to customize various aspects of both the backend and frontend components without modifying the code. Environment variables can be set in your deployment environment, such as in your Kubernetes configuration, Docker Compose file, or directly on your host system. + +## Backend Environment Variables + +### General + + + General configuration variables control the core behavior of the Keep server. + These settings determine fundamental aspects such as the server's host, port, + and whether certain components like the scheduler and consumer are enabled. + + +| Env var | Purpose | Required | Default Value | Valid options | +| :----------------------------------: | :---------------------------------------------------: | :------: | :----------------------------: | :--------------------------: | +| **KEEP_HOST** | Specifies the host for the Keep server | No | "0.0.0.0" | Valid hostname or IP address | +| **PORT** | Specifies the port on which the backend server runs | No | 8080 | Any valid port number | +| **SCHEDULER** | Enables or disables the workflow scheduler | No | "true" | "true" or "false" | +| **CONSUMER** | Enables or disables the consumer | No | "true" | "true" or "false" | +| **KEEP_VERSION** | Specifies the Keep version | No | "unknown" | Valid version string | +| **KEEP_API_URL** | Specifies the Keep API URL | No | Constructed from HOST and PORT | Valid URL | +| **KEEP_STORE_RAW_ALERTS** | Enables storing of raw alerts | No | "false" | "true" or "false" | +| **TENANT_CONFIGURATION_RELOAD_TIME** | Time in minutes to reload tenant configurations | No | 5 | Positive integer | +| **KEEP_LIVE_DEMO_MODE** | Keep will simulate incoming alerts and other activity | No | "false" | "true" or "false" | + +### Logging and Environment + + + Logging and environment configuration determines how Keep generates and + formats log output. These settings are crucial for debugging, monitoring, and + understanding the behavior of your Keep instance in different environments. + + +| Env var | Purpose | Required | Default Value | Valid options | +| :------------------: | :-----------------------------------------------------: | :------: | :--------------: | :---------------------------------------------: | +| **LOG_LEVEL** | Sets the logging level for the application | No | "INFO" | "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" | +| **ENVIRONMENT** | Specifies the environment the application is running in | No | "production" | "development", "staging", "production" | +| **LOG_FORMAT** | Specifies the log format | No | "open_telemetry" | "open_telemetry", "dev_terminal" | +| **LOG_AUTH_PAYLOAD** | Enables logging of authentication payload | No | "false" | "true" or "false" | + +### Database + + + Database configuration is crucial for Keep's data persistence. Keep supports + various database backends through SQLAlchemy, allowing flexibility in choosing + and configuring your preferred database system. + + +| Env var | Purpose | Required | Default Value | Valid options | +| :----------------------------: | :-----------------------------------------------: | :------: | :-------------------------------: | :--------------------------------: | +| **DATABASE_CONNECTION_STRING** | Specifies the database connection URL | Yes | None | Valid SQLAlchemy connection string | +| **DATABASE_POOL_SIZE** | Sets the database connection pool size | No | 5 | Positive integer | +| **DATABASE_MAX_OVERFLOW** | Sets the maximum overflow for the connection pool | No | 10 | Positive integer | +| **DATABASE_ECHO** | Enables SQLAlchemy echo mode for debugging | No | False | Boolean (True/False) | +| **DB_CONNECTION_NAME** | Specifies the Cloud SQL connection name | No | "keephq-sandbox:us-central1:keep" | Valid Cloud SQL connection string | +| **DB_NAME** | Specifies the Cloud SQL database name | No | "keepdb" | Valid Cloud SQL database name | +| **DB_SERVICE_ACCOUNT** | Service account for database impersonation | No | None | Valid service account email | +| **DB_IP_TYPE** | Specifies the Cloud SQL IP type | No | "public" | "public", "private" or "psc" | +| **SKIP_DB_CREATION** | Skips database creation and migrations | No | "false" | "true" or "false" | + +### Resource Provisioning + + + Resource provisioning settings control how Keep sets up initial resources. + This configuration is particularly important for automating the setup process + and ensuring that necessary resources are available when Keep starts. + + + To elaborate on resource provisioning and its configuration, please see + [provisioning docs](/deployment/provision/overview). + + +| Env var | Purpose | Required | Default Value | Valid options | +| :---------------------: | :---------------------------------------: | :------: | :-----------: | :---------------: | +| **PROVISION_RESOURCES** | Enables or disables resource provisioning | No | "true" | "true" or "false" | + +### Authentication + + + Authentication configuration determines how Keep verifies user identities and + manages access control. These settings are essential for securing your Keep + instance and integrating with various authentication providers. + + + For specific authentication type configuration, please see [authentication + docs](/deployment/authentication/overview). + + +| Env var | Purpose | Required | Default Value | Valid options | +| :-----------------------------------: | :---------------------------------------------------------------: | :------: | :-----------: | :------------------------------------------------: | +| **AUTH_TYPE** | Specifies the authentication type | No | "NOAUTH" | "AUTH0", "KEYCLOAK", "DB", "NOAUTH", "OAUTH2PROXY", "OKTA", "ONELOGIN" | +| **KEEP_JWT_SECRET** | Secret key for JWT token generation and validation (DB auth only) | Yes | None | Any strong secret string | +| **KEEP_DEFAULT_USERNAME** | Default username for the admin user (DB auth only) | No | "keep" | Any valid username string | +| **KEEP_DEFAULT_PASSWORD** | Default password for the admin user (DB auth only) | No | "keep" | Any strong password string | +| **KEEP_FORCE_RESET_DEFAULT_PASSWORD** | Forces reset of default user password | No | "false" | "true" or "false" | +| **KEEP_DEFAULT_API_KEYS** | Comma-separated list of default API keys to provision | No | "" | Format: "name:role:secret,name:role:secret" | + +### Service Mesh (Internal Alert Ingestion) + + + These settings allow trusted services within the same Kubernetes cluster to + POST alerts to Keep without requiring a Keep API key. This is intended for + service-to-service communication where network-level authentication (e.g. + Istio mTLS with AuthorizationPolicy) ensures only authorized callers can + reach Keep's alert ingestion endpoints. + + +| Env var | Purpose | Required | Default Value | Valid options | +| :------------------------------------------: | :--------------------------------------------------------------------------: | :------: | :-----------: | :-----------------: | +| **KEEP_ALLOW_MESH_ALERT_INGESTION** | Allows unauthenticated POST requests to `/alerts/event*` endpoints | No | "false" | "true" or "false" | + +When `KEEP_ALLOW_MESH_ALERT_INGESTION` is set to `"true"`, requests to `/alerts/event*` that do not carry an API key or bearer token are accepted and authenticated as an internal service with the `webhook` role. + +Calling services can optionally set the `X-Service-Name` HTTP header to identify themselves in Keep's logs and audit trail: + +```bash +curl -X POST http://keep-backend:8080/alerts/event \ + -H "Content-Type: application/json" \ + -H "X-Service-Name: my-service" \ + -d '[{"id":"alert-1","name":"Example Alert","severity":"info","status":"firing","source":["my-service"]}]' +``` + +The authenticated entity will have: +- **email**: `service:` (defaults to `service:unknown` if the header is not set) +- **role**: `webhook` (grants `write:alert` and `write:incident` scopes) + + + This feature bypasses API key authentication for the alert ingestion + endpoints. You **must** pair it with network-level access control (such as + Istio AuthorizationPolicy) to restrict which services can reach these + endpoints. Without network-level enforcement, any client that can reach + Keep's backend can POST alerts. + + +### Secrets Management + + + Secrets Management configuration specifies how Keep handles sensitive + information. This is crucial for securely storing and accessing confidential + data such as API keys and integrations credentials. + + +| Env var | Purpose | Required | Default Value | Valid options | +| :--------------------------: | :-------------------------------------------------------------------: | :------: | :-----------: | :---------------------------: | +| **SECRET_MANAGER_TYPE** | Defines the type of secret manager to use | Yes | "FILE" | "FILE", "GCP", "K8S", "VAULT", "DB" | +| **SECRET_MANAGER_DIRECTORY** | Directory for storing secrets when using file-based secret management | No | "/state" | Any valid directory path | + +### OpenTelemetry + + + OpenTelemetry configuration enables comprehensive observability for Keep. + These settings allow you to integrate Keep with various monitoring and tracing + systems, enhancing your ability to debug and optimize performance. + + +| Env var | Purpose | Required | Default Value | Valid options | +| :-------------------------------------: | :-----------------------------------------: | :------: | :-----------: | :-----------------------: | +| **OTEL_SERVICE_NAME** | OpenTelemetry service name | No | "keep-api" | Valid service name string | +| **SERVICE_NAME** | Alternative for OTEL_SERVICE_NAME | No | "keep-api" | Valid service name string | +| **OTEL_EXPORTER_OTLP_ENDPOINT** | OpenTelemetry collector endpoint | No | None | Valid URL | +| **OTLP_ENDPOINT** | Alternative for OTEL_EXPORTER_OTLP_ENDPOINT | No | None | Valid URL | +| **OTEL_EXPORTER_OTLP_TRACES_ENDPOINT** | OpenTelemetry traces endpoint | No | None | Valid URL | +| **OTEL_EXPORTER_OTLP_LOGS_ENDPOINT** | OpenTelemetry logs endpoint | No | None | Valid URL | +| **OTEL_EXPORTER_OTLP_METRICS_ENDPOINT** | OpenTelemetry metrics endpoint | No | None | Valid URL | +| **CLOUD_TRACE_ENABLED** | Enables Google Cloud Trace exporter | No | "false" | "true" or "false" | +| **METRIC_OTEL_ENABLED** | Enables OpenTelemetry metrics | No | "" | "true" or "false" | + +### WebSocket Server (Pusher/Soketi) + + + WebSocket server configuration controls real-time communication capabilities + in Keep. These settings are important for enabling features that require + instant updates and notifications. + + +| Env var | Purpose | Required | Default Value | Valid options | +| :-------------------: | :-------------------------------: | :-------------------: | :-----------: | :--------------------------: | +| **PUSHER_DISABLED** | Disables Pusher integration | No | "false" | "true" or "false" | +| **PUSHER_HOST** | Hostname of the Pusher server | No | None | Valid hostname or IP address | +| **PUSHER_PORT** | Port of the Pusher server | No | None | Any valid port number | +| **PUSHER_APP_ID** | Pusher application ID | Yes (if using Pusher) | None | Valid Pusher App ID | +| **PUSHER_APP_KEY** | Pusher application key | Yes (if using Pusher) | None | Valid Pusher App Key | +| **PUSHER_APP_SECRET** | Pusher application secret | Yes (if using Pusher) | None | Valid Pusher App Secret | +| **PUSHER_USE_SSL** | Enables SSL for Pusher connection | No | False | Boolean (True/False) | +| **PUSHER_CLUSTER** | Pusher cluster | No | None | Valid Pusher cluster name | + +### OpenAI + + + OpenAI configuration is used for integrating with OpenAI services. These + settings are important if you're utilizing OpenAI capabilities within Keep for + tasks such as natural language processing or AI-assisted operations. + + +| Env var | Purpose | Required | Default Value | Valid options | Backend/Frontend | +| :-------------------------: | :------------------------------------------------: | :------: | :-----------------: | :----------------------------------------------------------: | :--------------: | +| **OPENAI_API_KEY** | API key for OpenAI services | No | None | Valid OpenAI API key | Both | +| **OPENAI_MODEL_NAME** | Model name to use for OpenAI requests | No | "gpt-4o-2024-08-06" | Valid OpenAI model name (e.g., "gpt-4o", "gpt-4o-mini", ...) | Both | +| **OPEN_AI_ORGANIZATION_ID** | Organization ID for OpenAI services | No | None | Valid OpenAI organization ID | Both | +| **OPENAI_BASE_URL** | Base URL for OpenAI API (useful for LiteLLM proxy) | No | None | Valid URL (e.g., "http://localhost:4000") | Both | + + + For various different LLM based features, we also require to set these + environment variables for Keep's frontend too. + + +### Posthog + + + Posthog configuration controls Keep's integration with the Posthog analytics + platform. These settings are useful for tracking usage patterns and gathering + insights about how your Keep instance is being used. + + +| Env var | Purpose | Required | Default Value | Valid options | +| :------------------: | :---------------------------: | :------: | :-----------------------------------------------: | :-------------------: | +| **POSTHOG_API_KEY** | API key for PostHog analytics | No | "phc_muk9qE3TfZsX3SZ9XxX52kCGJBclrjhkP9JxAQcm1PZ" | Valid PostHog API key | +| **POSTHOG_DISABLED** | Disables PostHog integration | No | "false" | "true" or "false" | + +### Sentry + + + Sentry configuration controls Keep's integration with Sentry for error + monitoring and reporting. These settings are important for maintaining the + stability and reliability of your Keep instance. + + +| Env var | Purpose | Required | Default Value | Valid options | +| :-----------------: | :-------------------------: | :------: | :-----------: | :---------------: | +| **SENTRY_DISABLED** | Disables Sentry integration | No | "false" | "true" or "false" | + +### Ngrok + + + Ngrok configuration enables secure tunneling to your Keep instance. These + settings are particularly useful for development or when you need to expose + your local Keep instance to the internet securely. + + +| Env var | Purpose | Required | Default Value | Valid options | +| :------------------: | :----------------------------: | :------: | :-----------: | :--------------------: | +| **USE_NGROK** | Enables ngrok for tunneling | No | "false" | "true" or "false" | +| **NGROK_AUTH_TOKEN** | Authentication token for ngrok | No | None | Valid ngrok auth token | +| **NGROK_DOMAIN** | Custom domain for ngrok | No | None | Valid domain name | + +### Elasticsearch + + + Elasticsearch configuration controls Keep's integration with Elasticsearch for + advanced search capabilities. These settings are important if you're using + Elasticsearch to enhance Keep's search functionality and performance. + + +| Env var | Purpose | Required | Default Value | Valid options | +| :----------------------: | :-----------------------------------------: | :--------------------------: | :-----------: | :---------------------------: | +| **ELASTIC_ENABLED** | Enables Elasticsearch integration | No | "false" | "true" or "false" | +| **ELASTIC_API_KEY** | API key for Elasticsearch | Yes (if using Elasticsearch) | None | Valid Elasticsearch API key | +| **ELASTIC_HOSTS** | Comma-separated list of Elasticsearch hosts | Yes (if using Elasticsearch) | None | Valid Elasticsearch host URLs | +| **ELASTIC_USER** | Username for Elasticsearch basic auth | No | None | Valid username | +| **ELASTIC_PASSWORD** | Password for Elasticsearch basic auth | No | None | Valid password | +| **ELASTIC_INDEX_SUFFIX** | Suffix for Elasticsearch index names | Yes (for single tenant) | None | Any valid string | + +### Redis + + + Redis configuration specifies the connection details for Keep's Redis + instance. Redis is used for various caching and queueing purposes, making + these settings important for optimizing Keep's performance and scalability. + + +| Env var | Purpose | Required | Default Value | Valid options | +| :----------------: | :-------------------: | :------: | :-----------: | :--------------------------: | +| **REDIS** | Redis enabled | No | false | true or false | +| **REDIS_HOST** | Redis server hostname | No | "localhost" | Valid hostname or IP address | +| **REDIS_PORT** | Redis server port | No | 6379 | Valid port number | +| **REDIS_USERNAME** | Redis username | No | None | Valid username string | +| **REDIS_PASSWORD** | Redis password | No | None | Valid password string | + +### Redis Sentinel + + Redis sentinel configuration specifies the connection details for Keep's Redis sentinel + instance. Redis sentinel is used when you have a redis cluster and it acts as a broker. + + +| Env var | Purpose | Required | Default Value | Valid options | +| :---------------------------------: | :----------------------: | :------: | :-----------------: | :-----------------------------------------: | +| **REDIS** | Redis enabled | No | false | true or false | +| **REDIS_SENTINEL_HOSTS** | Redis sentinel server(s) | No | "localhost:26379" | "host1:port1,host2:port2" (comma-separated) | +| **REDIS_SENTINEL_SERVICE_NAME** | Redis sentinel service name | No | "mymaster" | Valid service name string | +| **REDIS_USERNAME** | Redis username | No | None | Valid username string | +| **REDIS_PASSWORD** | Redis password | No | None | Valid password string | + + +### ARQ + + + ARQ (Asynchronous Task Queue) configuration controls Keep's background task + processing. These settings are crucial for managing how Keep handles + long-running or scheduled tasks, ensuring efficient resource utilization and + responsiveness. + + +| Env var | Purpose | Required | Default Value | Valid options | +| :--------------------------: | :-------------------------------------------------: | :------: | :-----------: | :------------------: | +| **ARQ_BACKGROUND_FUNCTIONS** | Comma-separated list of background functions to run | No | None | Valid function names | +| **ARQ_KEEP_RESULT** | Duration to keep job results (in seconds) | No | 3600 | Positive integer | +| **ARQ_EXPIRES** | Default job expiration time (in seconds) | No | 3600 | Positive integer | +| **ARQ_EXPIRES_AI** | AI job expiration time (in seconds) | No | 3600000 | Positive integer | + +### Rate Limiting + + + Rate limiting configuration controls how many requests can be made to Keep's + API endpoints within a specified time period. This helps prevent abuse and + ensures system stability. + + +| Env var | Purpose | Required | Default Value | Valid options | +| :------------------------: | :-----------------------------------: | :------: | :-----------: | :-----------------------------------------------------------------------------------: | +| **KEEP_USE_LIMITER** | Enables or disables rate limiting | No | "false" | "true" or "false" | +| **KEEP_LIMIT_CONCURRENCY** | Sets the rate limit for API endpoints | No | "100/minute" | Format: "{number}/{interval}" where interval can be "second", "minute", "hour", "day" | + + +Currently, rate limiting is applied to the following endpoints: +- POST `/alerts/event` - Generic event ingestion endpoint +- POST `/alerts/{provider_type}` - Provider-specific event ingestion endpoints + +These endpoints are rate-limited according to the `KEEP_LIMIT_CONCURRENCY` setting when `KEEP_USE_LIMITER` is enabled. + + + + +### Maintenance Windows + + + The strategy enables the ability to manage how the alerts are handled + in case of a match with the Maintenance Windows Rules. + + +| Env var | Purpose | Required | Default Value | Valid options | +| :------------------------------: | :-----------------------------------------: | :---------------------: | :-----------: | :-----------------------------------------------: | +| **MAINTENANCE_WINDOW_STRATEGY** | Choose the strategy | No | "default" | "default" or "recover_previous_status" | +| **WATCHER_LAPSED_TIME** | Time in seconds to execute the alert review | No | 60 | Valid positive integer | + +## Frontend Environment Variables + + + Frontend configuration variables control the behavior and features of Keep's + user interface. These settings are crucial for customizing the frontend's + appearance, functionality, and integration with the backend services. + + +### General + +| Env var | Purpose | Required | Default Value | Valid options | +| ---------------------------------- | ------------------------------------------------------------------- | -------- | ------------- | --------------- | +| **API_URL** | Specifies the URL of the Keep backend API | Yes | None | Valid URL | +| **AUTH_SESSION_TIMEOUT** | Specifies user session timeout in seconds. Default is 30 days. | No | 2592000 | Value in seconds| +| **KEEP_HIDE_SENSITIVE_FIELDS** | Hides sensitive fields | No | None | "true", "false" | +| **HIDE_NAVBAR_CORRELATION** | Hides the correlation page from the navigation bar in the UI | No | None | "true" | +| **HIDE_NAVBAR_WORKFLOWS** | Hides the workflows page from the navigation bar in the UI | No | None | "true" | +| **HIDE_NAVBAR_SERVICE_TOPOLOGY** | Hides the service topology page from the navigation bar in the UI | No | None | "true" | +| **HIDE_NAVBAR_MAPPING** | Hides the mapping page from the navigation bar in the UI | No | None | "true" | +| **HIDE_NAVBAR_EXTRACTION** | Hides the extraction page from the navigation bar in the UI | No | None | "true" | +| **HIDE_NAVBAR_MAINTENANCE_WINDOW** | Hides the maintenance window page from the navigation bar in the UI | No | None | "true" | +| **HIDE_NAVBAR_AI_PLUGINS** | Hides the AI plugins page from the navigation bar in the UI | No | None | "true" | +| **KEEP_WF_LIST_EXTENDED_INFO** | Use a list instead a button to show the complete execution list | No | "true" | "true", "false" | + +### Authentication + + + Authentication configuration determines how Keep verifies user identities and + manages access control. These settings are essential for securing your Keep + instance and integrating with various authentication providers. + + +| Env var | Purpose | Required | Default Value | Valid options | +| :-----------------: | :-------------------------------: | :------: | :-----------: | :------------------------------------------------: | +| **AUTH_TYPE** | Specifies the authentication type | No | "NOAUTH" | "AUTH0", "KEYCLOAK", "DB", "NOAUTH", "OAUTH2PROXY", "OKTA", "ONELOGIN" | +| **NEXTAUTH_URL** | URL for NextAuth authentication | Yes | None | Valid URL | +| **NEXTAUTH_SECRET** | Secret key for NextAuth | Yes | None | Strong secret string | + +### Posthog + +| Env var | Purpose | Required | Default Value | Valid options | +| :--------------: | :------------------------------------: | :------: | :-----------: | :-------------------: | +| **POSTHOG_KEY** | PostHog API key for frontend analytics | No | None | Valid PostHog API key | +| **POSTHOG_HOST** | PostHog Host for frontend analytics | No | None | Valid PostHog Host | + +### Pusher + + + Pusher configuration is essential for enabling real-time updates and + communication in Keep's frontend. These settings allow the frontend to + establish a WebSocket connection with the Pusher server, facilitating instant + updates and notifications. + + +| Env var | Purpose | Required | Default Value | Valid options | +| :-----------------: | :---------------------------: | :---------------------: | :-----------: | :--------------------------: | +| **PUSHER_DISABLED** | Disables Pusher integration | No | "false" | "true" or "false" | +| **PUSHER_HOST** | Hostname of the Pusher server | No | "localhost" | Valid hostname or IP address | +| **PUSHER_PORT** | Port of the Pusher server | No | 6001 | Valid port number | +| **PUSHER_APP_KEY** | Pusher application key | Yes (if Pusher enabled) | "keepappkey" | Valid Pusher App Key | +| **PUSHER_CLUSTER** | Pusher cluster | No | None | Valid Pusher cluster name | diff --git a/docs/deployment/docker.mdx b/docs/deployment/docker.mdx index 5b77e205ce..0f32e5e425 100644 --- a/docs/deployment/docker.mdx +++ b/docs/deployment/docker.mdx @@ -29,3 +29,38 @@ The docker-compose.yml contains 3 services: - [keep-backend](https://console.cloud.google.com/artifacts/docker/keephq/us-central1/keep/keep-api?project=keephq) - a fastapi service that as the API server. - [keep-frontend](https://console.cloud.google.com/artifacts/docker/keephq/us-central1/keep/keep-ui?project=keephq) - a nextjs app that serves as Keep UI interface. - [keep-websocket-server](https://docs.soketi.app/getting-started/installation/docker) - Soketi (a pusher compatible websocket server) for real time alerting. + +### Reinstall Keep with the option to refresh from scratch + +`Caution:` This usage context will refresh from the beginning and Keep's data and settings will be erased. Even other containers on this host are also erased. So please consider when using the steps below. + +For cases where you need to test many different options or simply want to reinstall Keep from scratch using docker compose without spending a lot of time, that is, without repeating the steps of installing docker, downloading the installer.. .. run the commands according to the previous instructions. + +Follow these steps + +#### Step1: Stop, Clear container, network, volume, image. +In the directory containing the docker compose file you downloaded, say `/root/` + +``` +docker-compose down + +docker-compose down --rmi all + +docker-compose down -v + +docker system prune -a --volumes +``` + +#### Step2: Clear Config db, config file in state folder. + +``` +rm -rf state/* + +``` + +#### Step 3: Run again + +``` +docker compose up -d +``` + diff --git a/docs/deployment/ecs.mdx b/docs/deployment/ecs.mdx index 097092e1a7..61289afcdd 100644 --- a/docs/deployment/ecs.mdx +++ b/docs/deployment/ecs.mdx @@ -102,7 +102,7 @@ sidebarTitle: "AWS ECS" - Configuration Type: Configure at task definition creation - Volume type: EFS - Storage configurations: - - File system ID: Select an exisiting EFS filesystem or create a new one + - File system ID: Select an existing EFS filesystem or create a new one - Root Directory: / ![Volume Configuration](/images/ecs-task-def-backend5.png) - Container mount points: diff --git a/docs/deployment/kubernetes.mdx b/docs/deployment/kubernetes.mdx deleted file mode 100644 index 07119266e4..0000000000 --- a/docs/deployment/kubernetes.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "Kubernetes" -sidebarTitle: "Kubernetes" ---- - -Keep can be installed via Helm Chart. - -First, add the Helm repository of Keep and pull the latest version of the chart: -``` -helm repo add keephq https://keephq.github.io/helm-charts -helm pull keephq/keep -``` - -Next, install using: -``` -helm install keep keephq/keep -``` - -Notice for it to work locally, you'll need this port forwarding: -``` -kubectl port-forward svc/keep-frontend 3000:3000 -kubectl port-forward svc/keep-backend 8080:8080 -``` - -To learn more about Keep's helm chart, see https://github.com/keephq/helm-charts/blob/main/README.md - -To discover about how to configure Keep using Helm, see auto generated helm-docs at https://github.com/keephq/helm-charts/blob/main/charts/keep/README.md diff --git a/docs/deployment/kubernetes/architecture.mdx b/docs/deployment/kubernetes/architecture.mdx new file mode 100644 index 0000000000..5d10512701 --- /dev/null +++ b/docs/deployment/kubernetes/architecture.mdx @@ -0,0 +1,132 @@ +--- +title: "Architecture" +sidebarTitle: "Architecture" +--- + + +## High Level Architecture +Keep architecture composes of two main components: + +1. **Keep API** - A FastAPI-based backend server that handles business logic and API endpoints. +2. **Keep Frontend** - A Next.js-based frontend interface for user interaction. +3. **Websocket Server** - A Soketi server for real-time updates without page refreshes. +4. **Database Server** - A database used to store and manage persistent data. Supported databases include SQLite, PostgreSQL, MySQL, and SQL Server. + +## Kubernetes Architecture + +Keep uses a single unified NGINX ingress controller to route traffic to all components (frontend, backend, and websocket). The ingress handles path-based routing: + +By default: +- `/` routed to **Frontend** (configurable via `global.ingress.frontendPrefix`) +- `/v2` routed to **Backend** (configurable via `global.ingress.backendPrefix`) +- `/websocket` routed to **WebSocket** (configurable via `global.ingress.websocketPrefix`) + +### General Components + +Keep uses kubernetes secret manager to store secrets such as integrations credentials. + +| Kubernetes Resource | Purpose | Required/Optional | Source | +|:-------------------:|:-------:|:-----------------:|:------:| +| ServiceAccount | Provides an identity for processes that run in a Pod. Used mainly for Keep API to access kubernetes secret manager | Required | [serviceaccount.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/serviceaccount.yaml) | +| Role | Defines permissions for the ServiceAccount to manage secrets | Required | [role-secret-manager.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/role-secret-manager.yaml) | +| RoleBinding | Associates the Role with the ServiceAccount | Required | [role-binding-secret-manager.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/role-binding-secret-manager.yaml) | +| Secret Deletion Job | Cleans up Keep-related secrets when the Helm release is deleted | Required | [delete-secret-job.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/delete-secret-job.yaml) | + +### Ingress Component +| Kubernetes Resource | Purpose | Required/Optional | Source | +|:-------------------:|:-------:|:-----------------:|:------:| +| Shared NGINX Ingress | Routes all external traffic via one entry point | Optional | [nginx-ingress.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/nginx-ingress.yaml) | + +### Frontend Components + +| Kubernetes Resource | Purpose | Required/Optional | Source | +|:-------------------:|:-------:|:-----------------:|:------:| +| Frontend Deployment | Manages the frontend application containers | Required | [frontend.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/frontend.yaml) | +| Frontend Service | Exposes the frontend deployment within the cluster | Required | [frontend-service.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/frontend-service.yaml) | +| Frontend Route (OpenShift) | Exposes the frontend service to external traffic on OpenShift | Optional | [frontend-route.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/frontend-route.yaml) | +| Frontend HorizontalPodAutoscaler | Automatically scales the number of frontend pods | Optional | [frontend-hpa.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/frontend-hpa.yaml) | + +#### Backend Components + +| Kubernetes Resource | Purpose | Required/Optional | Source | +|:-------------------:|:-------:|:-----------------:|:------:| +| Backend Deployment | Manages the backend application containers | Required (if backend enabled) | [backend.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/backend.yaml) | +| Backend Service | Exposes the backend deployment within the cluster | Required (if backend enabled) | [backend-service.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/backend-service.yaml) | +| Backend Route (OpenShift) | Exposes the backend service to external traffic on OpenShift | Optional | [backend-route.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/backend-route.yaml) | +| Backend HorizontalPodAutoscaler | Automatically scales the number of backend pods | Optional | [backend-hpa.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/backend-hpa.yaml) | + +#### Database Components +Database components are optional. You can spin up Keep with your own database. + +| Kubernetes Resource | Purpose | Required/Optional | Source | +|:-------------------:|:-------:|:-----------------:|:------:| +| Database Deployment | Manages the database containers (e.g. MySQL or Postgres) | Optional | [db.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/db.yaml) | +| Database Service | Exposes the database deployment within the cluster | Required (if deployment enabled) | [db-service.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/db-service.yaml) | +| Database PersistentVolume | Provides persistent storage for the database | Optional | [db-pv.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/db-pv.yaml) | +| Database PersistentVolumeClaim | Claims the persistent storage for the database | Optional | [db-pvc.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/db-pvc.yaml) | + +#### WebSocket Components +WebSocket components are optional. You can spin up Keep with your own *Pusher compatible* WebSocket server. + +| Kubernetes Resource | Purpose | Required/Optional | Source | +|:-------------------:|:-------:|:-----------------:|:------:| +| WebSocket Deployment | Manages the WebSocket server containers (Soketi) | Optional | [websocket-server.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/websocket-server.yaml) | +| WebSocket Service | Exposes the WebSocket deployment within the cluster | Required (if WebSocket enabled) | [websocket-server-service.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/websocket-server-service.yaml) | +| WebSocket Route (OpenShift) | Exposes the WebSocket service to external traffic on OpenShift | Optional | [websocket-server-route.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/websocket-server-route.yaml) | +| WebSocket HorizontalPodAutoscaler | Automatically scales the number of WebSocket server pods | Optional | [websocket-server-hpa.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/websocket-server-hpa.yaml) | + +These tables provide a comprehensive overview of the Kubernetes resources used in the Keep architecture, organized by component type. Each table describes the purpose of each resource, indicates whether it's required or optional, and provides a direct link to the source template in the Keep Helm charts GitHub repository. + +### Kubernetes Configuration +This sections covers only kubernetes-specific configuration. To learn about Keep-specific configuration, controlled by environment variables, see [Keep Configuration](/deployment/configuration) + +Each of these components can be customized via the `values.yaml` file in the Helm chart. + + +Below are key configurations that can be adjusted for each component. + +#### 1. Frontend Configuration +```yaml +frontend: + enabled: true # Enable or disable the frontend deployment. + replicaCount: 1 # Number of frontend replicas. + image: + repository: us-central1-docker.pkg.dev/keephq/keep/keep-ui + pullPolicy: Always # Image pull policy (Always, IfNotPresent). + tag: latest + serviceAccount: + create: true # Create a new service account. + name: "" # Service account name (empty for default). + podAnnotations: {} # Annotations for frontend pods. + podSecurityContext: {} # Security context for the frontend pods. + securityContext: {} # Security context for the containers. + service: + type: ClusterIP # Service type (ClusterIP, NodePort, LoadBalancer). + port: 3000 # Port on which the frontend service is exposed. +``` + +#### 2. Backend Configuration +```yaml +backend: + enabled: true # Enable or disable the backend deployment. + replicaCount: 1 # Number of backend replicas. + image: + repository: us-central1-docker.pkg.dev/keephq/keep/keep-api + pullPolicy: Always # Image pull policy (Always, IfNotPresent). + serviceAccount: + create: true # Create a new service account. + name: "" # Service account name (empty for default). + podAnnotations: {} # Annotations for backend pods. + podSecurityContext: {} # Security context for backend pods. + securityContext: {} # Security context for containers. + service: + type: ClusterIP # Service type (ClusterIP, NodePort, LoadBalancer). + port: 8080 # Port on which the backend API is exposed. +``` + +#### 3. WebSocket Server Configuration +Keep uses Soketi as its websocket server. To learn how to configure it, please see [Soketi docs](https://github.com/soketi/charts/tree/master/charts/soketi). + + +#### 4. Database Configuration +Keep supports plenty of database (e.g. postgresql, mysql, sqlite, etc). It is out of scope to describe here how to deploy all of them to k8s. If you have specific questions - [contact us](https://slack.keephq.dev) and we will be happy to help. diff --git a/docs/deployment/kubernetes/installation.mdx b/docs/deployment/kubernetes/installation.mdx new file mode 100644 index 0000000000..870a84560e --- /dev/null +++ b/docs/deployment/kubernetes/installation.mdx @@ -0,0 +1,221 @@ +--- +title: "Installation" +sidebarTitle: "Installation" +--- + + +The recommended way to install Keep on Kubernetes is via Helm Chart.

+Follow these steps to set it up. +
+ +# Prerequisites + +## Helm CLI +See the [Helm documentation](https://helm.sh/docs/intro/install/) for instructions about installing helm. + +## Ingress Controller (Optional) + +You can skip this step if: +1. You already have **ingress-nginx** installed. +2. You don't need to expose Keep to the internet/network. + + +### Overview +An ingress controller is essential for managing external access to services in your Kubernetes cluster. It acts as a smart router and load balancer, allowing you to expose multiple services through a single entry point while handling SSL termination and routing rules. + + + +**Keep works best with both** [ingress-nginx](https://github.com/kubernetes/ingress-nginx) **and** [HAProxy Ingress](https://haproxy-ingress.github.io/) **controllers, but you can customize the helm chart for other ingress controllers too.** + + +### Nginx Ingress Controller + +#### Check ingress-nginx Installed +You check if you already have ingress-nginx installed: +```bash +# By default, the ingress-nginx will be installed under the ingress-nginx namespace +kubectl -n ingress-nginx get pods +NAME READY STATUS RESTARTS AGE +ingress-nginx-controller-d49697d5f-hjhbj 1/1 Running 0 4h19m + +# Or check for the ingress class +kubectl get ingressclass +NAME CONTROLLER PARAMETERS AGE +nginx k8s.io/ingress-nginx 4h19m + +``` + +#### Install ingress-nginx + +To read about more installation options, see [ingress-nginx installation docs](https://kubernetes.github.io/ingress-nginx/deploy/). + + +Since ingress-nginx 4.12, you'll need to add +``` +--set controller.config.annotations-risk-level=Critical +``` +See https://github.com/kubernetes/ingress-nginx/issues/12618#issuecomment-2566084202 + +```bash +# simplest way to install +# we set snippet-annotations to true to allow rewrites +# see https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#allow-snippet-annotations +helm upgrade --install ingress-nginx ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --set controller.config.allow-snippet-annotations=true \ + --set controller.config.annotations-risk-level=Critical \ + --namespace ingress-nginx --create-namespace +``` + +Verify installation: +```bash +kubectl get ingressclass +NAME CONTROLLER PARAMETERS AGE +nginx k8s.io/ingress-nginx 4h19m +``` + +Verify if snippet annotations are enabled: +```bash +kubectl get configmap -n ingress-nginx ingress-nginx-controller -o yaml | grep allow-snippet-annotations +allow-snippet-annotations: "true" +``` + +### HAProxy Ingress Controller + +#### Install ingress-haproxy + +To read about more installation options, see [haproxy-ingress installation docs](https://haproxy-ingress.github.io/docs/getting-started/). + + +```bash +# simplest way to install +helm upgrade --install haproxy-ingress haproxy-ingress \ + --repo https://haproxy-ingress.github.io/charts \ + --namespace ingress-haproxy --create-namespace +``` + +Verify installation: +```bash +kubectl get ingressclass +NAME CONTROLLER PARAMETERS AGE +haproxy haproxy-ingress.github.io/controller 4h19m +``` + +Verify if controller is running: +```bash +kubectl get pods -n ingress-haproxy -l app.kubernetes.io/instance=haproxy-ingress +NAME READY STATUS RESTARTS AGE +haproxy-ingress-controller-x4n2z 1/1 Running 0 4h19m +``` + +## Installation + +### With Ingress-NGINX (Recommended) + +```bash +# Add the Helm repository +helm repo add keephq https://keephq.github.io/helm-charts + +# Install Keep with ingress enabled +helm install keep keephq/keep -n keep --create-namespace +``` + +### With Ingress-HAProxy (Recommended) + +```bash +# Add the Helm repository +helm repo add keephq https://keephq.github.io/helm-charts + +# Install Keep with ingress enabled +helm install keep keephq/keep -n keep --create-namespace --set global.ingress.className=haproxy +``` + +### Without Ingress (Not Recommended) + +```bash +# Add the Helm repository +helm repo add keephq https://keephq.github.io/helm-charts + +# Install Keep without ingress enabled. +# You won't be able to access Keep from the network. +helm install keep keephq/keep -n keep --create-namespace \ + --set global.ingress.enabled=false +``` + +## Accessing Keep + +### Ingress +If you installed Keep with ingress, you should be able to access Keep. + +```bash +kubectl -n keep get ingress +NAME CLASS HOSTS ADDRESS PORTS AGE +keep-ingress nginx * X.X.X.X 80 4h16m +``` + +Keep is available at http://X.X.X.X :) + +### Without Ingress (Port-Forwarding) + +Use the following commands to access Keep locally without ingress: +```bash +# Forward the UI +kubectl port-forward svc/keep-frontend 3000:3000 -n keep & + +# Forward the Backend +kubectl port-forward svc/keep-backend 8080:8080 -n keep & + +# Forward WebSocket server (optional) +kubectl port-forward svc/keep-websocket 6001:6001 -n keep & +``` + +Keep is available at http://localhost:3000 :) + +## Configuring HTTPS + +### Prerequisites +1. Domain Name: Example - keep.yourcompany.com +2. TLS Certificate: Private key (tls.key) and certificate (tls.crt) + +### Create the TLS Secret + +Assuming: +- `tls.crt` contains the certificate. +- `tls.key` contains the private key. + +```bash +# create the secret with kubectl +kubectl create secret tls keep-tls --cert=./tls.crt --key=./tls.key -n keep +``` + +### Update Helm Values for TLS +```bash +helm upgrade -n keep keep keephq/keep \ + --set "global.ingress.hosts[0].host=keep.example.com" \ + --set "global.ingress.tls[0].hosts[0]=keep.example.com" \ + --set "global.ingress.tls[0].secretName=keep-tls" +``` + + + +Alternatively, update your `values.yaml`: +```bash +... +global: + ingress: + hosts: + - host: keep.example.com + tls: + - hosts: + - keep.example.com + secretName: keep-tls +... +``` + + +## Uninstallation +To remove Keep and clean up: +```bash +helm uninstall keep -n keep +kubectl delete namespace keep +``` diff --git a/docs/deployment/openshift.mdx b/docs/deployment/kubernetes/openshift.mdx similarity index 100% rename from docs/deployment/openshift.mdx rename to docs/deployment/kubernetes/openshift.mdx diff --git a/docs/deployment/kubernetes/overview.mdx b/docs/deployment/kubernetes/overview.mdx new file mode 100644 index 0000000000..9b891c3d7f --- /dev/null +++ b/docs/deployment/kubernetes/overview.mdx @@ -0,0 +1,19 @@ +--- +title: "Overview" +sidebarTitle: "Overview" +--- + + If you need help deploying Keep on Kubernetes or have any feedback or suggestions, feel free to open a ticket in our [GitHub repo](https://github.com/keephq/keep) or say hello in our [Slack](https://slack.keephq.dev). + + +Keep is designed as a Kubernetes-native application. + +We maintain an opinionated, batteries-included Helm chart, but you can customize it as needed. + + +## Next steps +- Install Keep on [Kubernetes](/deployment/kubernetes/installation). +- Keep's [Helm Chart](https://github.com/keephq/helm-charts). +- Keep with [Kubernetes Secret Manager](/deployment/secret-store#kubernetes-secret-manager) +- Deep dive to Keep's kubernetes [Architecture](/deployment/kubernetes/architecture). +- Install Keep on [OpenShift](/deployment/kubernetes/openshift). diff --git a/docs/deployment/local-llm/keep-with-litellm.mdx b/docs/deployment/local-llm/keep-with-litellm.mdx new file mode 100644 index 0000000000..12f3923577 --- /dev/null +++ b/docs/deployment/local-llm/keep-with-litellm.mdx @@ -0,0 +1,65 @@ +--- +title: "Running Keep with LiteLLM" +--- + + + This guide is for users who want to run Keep with locally hosted LLM models. + If you encounter any issues, please talk to us at our (Slack + community)[https://slack.keephq.dev]. + + +## Overview + +This guide will help you set up Keep with LiteLLM, a versatile tool that supports over 100 LLM providers. LiteLLM acts as a proxy that adheres to OpenAI standards, allowing seamless integration with Keep. By following this guide, you can easily configure Keep to work with various LLM providers using LiteLLM. + +### Motivation + +Incorporating LiteLLM with Keep allows organizations to run local models in on-premises and air-gapped environments. This setup is particularly beneficial for leveraging AIOps capabilities while ensuring that sensitive data does not leave the premises. By using LiteLLM as a proxy, you can seamlessly integrate with Keep and access a wide range of LLM providers without compromising data security. This approach is ideal for organizations that prioritize data privacy and need to comply with strict regulatory requirements. + +## Prerequisites + +### Running LiteLLM locally + +1. Ensure you have Python and pip installed on your system. +2. Install LiteLLM by running the following command: + +```bash +pip install litellm +``` + +3. Start LiteLLM with your desired model. For example, to use the HuggingFace model: + +```bash +litellm --model huggingface/bigcode/starcoder +``` + +This will start the proxy server on `http://0.0.0.0:4000`. + +### Running LiteLLM with Docker + +To run LiteLLM using Docker, you can use the following command: + +```bash +docker run -p 4000:4000 litellm/litellm --model huggingface/bigcode/starcoder +``` + +This command will start the LiteLLM proxy in a Docker container, exposing it on port 4000. + +## Configuration + +| Env var | Purpose | Required | Default Value | Valid options | +| :-------------------------: | :-----------------------------------------: | :------: | :-----------: | :---------------------------------------: | +| **OPEN_AI_ORGANIZATION_ID** | Organization ID for OpenAI/LiteLLM services | Yes | None | Valid organization ID string | +| **OPEN_AI_API_KEY** | API key for OpenAI/LiteLLM services | Yes | None | Valid API key string | +| **OPENAI_BASE_URL** | Base URL for the LiteLLM proxy | Yes | None | Valid URL (e.g., "http://localhost:4000") | + + + These environment variables should be set on both Keep **frontend** and + **backend**. + + +## Additional Resources + +- [LiteLLM Documentation](https://docs.litellm.ai/) + +By following these steps, you can leverage the power of multiple LLM providers with Keep, using LiteLLM as a flexible and powerful proxy. diff --git a/docs/deployment/monitoring.mdx b/docs/deployment/monitoring.mdx new file mode 100644 index 0000000000..9800908a7a --- /dev/null +++ b/docs/deployment/monitoring.mdx @@ -0,0 +1,22 @@ +--- +title: "Monitoring" +sidebarTitle: "Monitoring" +--- + +# Healthchecks + +Keep's Backend healthcheck url: +``` +{BACKEND_API_URL}/healthcheck +``` + +Keep's Frontend healthcheck url: +``` +{FRONTEND_URL}/api/healthcheck +``` + +# Prometheus Metrics + +(TBD) + +> Please note that /api/metrics are not designed for production instance's health monitoring, but for usage monitoring by a specific tenant. diff --git a/docs/deployment/provision/dashboard.mdx b/docs/deployment/provision/dashboard.mdx new file mode 100644 index 0000000000..fcb950409a --- /dev/null +++ b/docs/deployment/provision/dashboard.mdx @@ -0,0 +1,83 @@ +--- +title: "Dashboard Provisioning" +--- + +Provisioning dashboards in Keep allows you to configure and manage visual representations of your data. This section will guide you through the steps required to set up and provision dashboards. + +### Dashboard Provisioning Overview + +Dashboards in Keep are configured using JSON strings that define the layout, data sources, and visual components. These configurations can be managed through environment variables or configuration files. + +### Environment Variables + +To provision dashboards, you need to set the following environment variable: + +| Environment Variable | Purpose | +| -------------------- | ----------------------------------------------- | +| `KEEP_DASHBOARDS` | JSON string containing dashboard configurations | + +### Example Configuration + +Here is an example of how to set the `KEEP_DASHBOARDS` environment variable (dumped from the database): + +```json +[ + { + "dashboard_name": "My Dashboard", + "dashboard_config": { + "layout": [ + { + "i": "w-1728223503577", + "x": 0, + "y": 0, + "w": 3, + "h": 3, + "minW": 2, + "minH": 2, + "static": false + } + ], + "widget_data": [ + { + "i": "w-1728223503577", + "x": 0, + "y": 0, + "w": 3, + "h": 3, + "minW": 2, + "minH": 2, + "static": false, + "thresholds": [ + { "value": 0, "color": "#22c55e" }, + { "value": 20, "color": "#ef4444" } + ], + "preset": { + "id": "11111111-1111-1111-1111-111111111111", + "name": "feed", + "options": [ + { "label": "CEL", "value": "(!deleted && !dismissed)" }, + { + "label": "SQL", + "value": { + "sql": "(deleted=false AND dismissed=false)", + "params": {} + } + } + ], + "created_by": null, + "is_private": false, + "is_noisy": false, + "should_do_noise_now": false, + "alerts_count": 98, + "static": true, + "tags": [] + }, + "name": "Test" + } + ] + } + } +] +``` + +Please read more at https://github.com/react-grid-layout/react-grid-layout for more information on the layout configuration options. diff --git a/docs/deployment/provision/overview.mdx b/docs/deployment/provision/overview.mdx new file mode 100644 index 0000000000..be567c4725 --- /dev/null +++ b/docs/deployment/provision/overview.mdx @@ -0,0 +1,39 @@ +--- +title: "Overview" +--- + +Keep supports various deployment and provisioning strategies to accommodate different environments and use cases, from development setups to production deployments. + +### Provisioning Options + +Keep offers three main provisioning options: + +1. [**Provider Provisioning**](/deployment/provision/provider) - Set up and manage data providers with their deduplication rules for Keep. +2. [**Workflow Provisioning**](/deployment/provision/workflow) - Configure and manage workflows within Keep. +3. [**Dashboard Provisioning**](/deployment/provision/dashboard) - Configure and manage dashboards within Keep. + + +Choosing the right provisioning strategy depends on your specific use case, deployment environment, and scalability requirements. You can read more about each provisioning option in their respective sections. + +### How To Configure Provisioning + + + Some provisioning options require additional environment variables. These will + be covered in detail on the specific provisioning pages. + + +Provisioning in Keep is controlled through environment variables and configuration files. The main environment variables for provisioning are: + +| Provisioning Type | Environment Variable | Purpose | +| ---------------------- | ------------------------------ | ------------------------------------------------------------------------- | +| **Provider** | `KEEP_PROVIDERS` | JSON string containing provider configurations with deduplication rules | +| **Workflow** | `KEEP_WORKFLOW` | One workflow to provision right from the env variable. | +| **Workflows** | `KEEP_WORKFLOWS_DIRECTORY` | Directory path containing workflow configuration files | +| **Dashboard** | `KEEP_DASHBOARDS` | JSON string containing dashboard configurations | + +Hint: use the script to get 1-liner from the workflow file for KEEP_WORKFLOW: +``` +Use `cat workflow_file.yaml | awk '{printf "%s\\n", $0}' | tr -d '\n'; echo` to get the workflow in 1-string format. +``` + +For more details on each provisioning strategy, including setup instructions and implications, refer to the respective sections. diff --git a/docs/deployment/provision/provider.mdx b/docs/deployment/provision/provider.mdx new file mode 100644 index 0000000000..07ecbb81a6 --- /dev/null +++ b/docs/deployment/provision/provider.mdx @@ -0,0 +1,137 @@ +--- +title: "Providers Provisioning" +--- + +For any questions or issues related to provider provisioning, please join our [Slack](https://slack.keephq.dev) community. + +Provider provisioning in Keep allows you to set up and manage data providers dynamically. This feature enables you to configure various data sources that Keep can interact with, such as monitoring systems, databases, or other services. + +### Configuring Providers + +To provision providers and deduplication rules for them, we can configure via the environment variable. This can be done in two ways: +1. Using `KEEP_PROVIDERS` environment variable which either contains a JSON string or a path to a JSON file that contains the providers configurations. +2. Using `KEEP_PROVIDERS_DIRECTORY` environment variable which contains a path to a directory that contains the providers configurations (configured via YAML files). This is the recommended approach. + + +Keep does not allow to use both `KEEP_PROVIDERS` and `KEEP_PROVIDERS_DIRECTORY` environment variables at the same time. + + + +Keep can automatically install webhooks for providers that support them. This behavior depends on the configuration and the provisioning method used. + + +Please note: Deduplication rules are not mandatory for provider distribution. + +### Providers provisioning using KEEP_PROVIDERS + +Providers provisioning JSON example: +```json +{ + "keepVictoriaMetrics": { + "type": "victoriametrics", + "authentication": { + "VMAlertHost": "http://localhost", + "VMAlertPort": 1234 + }, + "install_webhook": true, + "deduplication_rules": { + "deduplication rule name example 1": { + "description": "deduplication rule name example 1", + "fingerprint_fields": ["fingerprint", "source", "service"], + "full_deduplication": true, + "ignore_fields": ["name", "lastReceived"] + }, + "deduplication rule name example 2": { + "description": "deduplication rule name example 2", + "fingerprint_fields": ["fingerprint", "source", "service"], + "full_deduplication": false, + } + } + }, + "keepClickhouse1": { + "type": "clickhouse", + "authentication": { + "host": "http://localhost", + "port": 1234, + "username": "keep", + "password": "keep", + "database": "keep-db" + } + } +} +``` + +Spin up Keep with this `KEEP_PROVIDERS` value: +```json +# ENV +KEEP_PROVIDERS={"keepVictoriaMetrics":{"type":"victoriametrics","authentication":{"VMAlertHost":"http://localhost","VMAlertPort": 1234},"install_webhook":true},"keepClickhouse1":{"type":"clickhouse","authentication":{"host":"http://localhost","port":"4321","username":"keep","password":"1234","database":"keepdb"}}} +``` + +By default, when provisioning using `KEEP_PROVIDERS`, webhooks are automatically installed for providers that support them unless the `install_webhook` flag is set to `false`. + +### Providers provisioning using KEEP_PROVIDERS_DIRECTORY + +Specify the path to the directory containing the providers configurations: + +```bash +# ENV +KEEP_PROVIDERS_DIRECTORY=/path/to/providers +``` + +The directory should contain YAML files with the providers configurations. + +Example of a provider configuration YAML file: + +```yaml +name: keepVictoriaMetrics +type: victoriametrics +authentication: + VMAlertHost: http://localhost + VMAlertPort: 1234 +install_webhook: false +deduplication_rules: + deduplication_rule_name_example_1: + description: deduplication rule name example 1 + fingerprint_fields: + - fingerprint + - source + - service + full_deduplication: true + ignore_fields: + - name + - lastReceived +``` + +The `install_webhook` field controls whether Keep sets up webhooks automatically for that provider. By default, when provisioning using `KEEP_PROVIDERS_DIRECTORY`, webhook installation is disabled unless explicitly set to `true`. + +### Supported Providers + +Keep supports a wide range of provider types. Each provider type has its own specific configuration requirements. +To see the full list of supported providers and their detailed configuration options, please refer to our comprehensive provider documentation. + + +### Update Provisioned Providers + +#### Using KEEP_PROVIDERS + +Provider configurations can be updated dynamically by changing the `KEEP_PROVIDERS` environment variable. + +On every restart, Keep reads this environment variable and determines which providers need to be added or removed. + +This process allows for flexible management of data sources without requiring manual intervention. By simply updating the `KEEP_PROVIDERS` variable and restarting the application, you can efficiently add new providers, remove existing ones, or modify their configurations. + +The high-level provisioning mechanism: +1. Keep reads the `KEEP_PROVIDERS` value. +2. Keep checks if there are any provisioned providers that are no longer in the `KEEP_PROVIDERS` value, and deletes them. +3. Keep installs all providers from the `KEEP_PROVIDERS` value. + +#### Using KEEP_PROVIDERS_DIRECTORY + +Provider configurations can be updated dynamically by changing the YAML files in the `KEEP_PROVIDERS_DIRECTORY` directory. + +On every restart, Keep reads the YAML files in the `KEEP_PROVIDERS_DIRECTORY` directory and determines which providers need to be added or removed. + +The high-level provisioning mechanism: +1. Keep reads the YAML files in the `KEEP_PROVIDERS_DIRECTORY` directory. +2. Keep checks if there are any provisioned providers that are no longer in the YAML files, and deletes them. +3. Keep installs all providers from the YAML files. diff --git a/docs/deployment/provision/workflow.mdx b/docs/deployment/provision/workflow.mdx new file mode 100644 index 0000000000..134704dc4f --- /dev/null +++ b/docs/deployment/provision/workflow.mdx @@ -0,0 +1,36 @@ +--- +title: "Workflow Provisioning" +--- + +For any questions or issues related to workflow provisioning, please join our [Slack](https://slack.keephq.dev) community. + +Workflow provisioning in Keep allows you to set up and manage workflows dynamically. This feature enables you to configure various automated processes and tasks within your Keep deployment. + +### Configuring Workflows + +To provision workflows, follow these steps: + +1. Set the `KEEP_WORKFLOWS_DIRECTORY` environment variable to the path of your workflow configuration directory. +2. Create workflow configuration files in the specified directory. + +Example directory structure: +``` +/path/to/workflows/ +├── workflow1.yaml +├── workflow2.yaml +└── workflow3.yaml +``` +### Update Provisioned Workflows + +On every restart, Keep reads the `KEEP_WORKFLOWS_DIRECTORY` environment variable and determines which workflows need to be added, removed, or updated. + +This process allows for flexible management of workflows without requiring manual intervention. By simply updating the workflow files in the `KEEP_WORKFLOWS_DIRECTORY` and restarting the application, you can efficiently add new workflows, remove existing ones, or modify their configurations. + +The high-level provisioning mechanism: +1. Keep reads the `KEEP_WORKFLOWS_DIRECTORY` value. +2. Keep lists all workflow files under the `KEEP_WORKFLOWS_DIRECTORY` directory. +3. Keep compares the current workflow files with the previously provisioned workflows: + - New workflow files are provisioned. + - Missing workflow files are deprovisioned. + - Updated workflow files are re-provisioned with the new configuration. +4. Keep updates its internal state to reflect the current set of provisioned workflows. diff --git a/docs/deployment/secret-manager.mdx b/docs/deployment/secret-manager.mdx deleted file mode 100644 index 4de17c68a6..0000000000 --- a/docs/deployment/secret-manager.mdx +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: "Secret Manager" -sidebarTitle: "Secret Manager" ---- - -## Overview - -Secret Manager selection is crucial for securing your application. Different modes can be set up depending on the deployment type. Our system supports four primary secret manager types. - -## Secret Manager Factory - -The `SecretManagerFactory` is a utility class used to create instances of different types of secret managers. It leverages the Factory design pattern to abstract the creation logic based on the type of secret manager required. The factory supports creating instances of File, GCP, Kubernetes, and Vault Secret Managers. - -The `SECRET_MANAGER_TYPE` environment variable plays a crucial role in the SecretManagerFactory for determining the default type of secret manager to be instantiated when no specific type is provided in the method call. - -**Functionality**: - -**Default Secret Manager**: If the `SECRET_MANAGER_TYPE` environment variable is set, its value dictates the default type of secret manager that the factory will create. -The value of this variable should correspond to one of the types defined in SecretManagerTypes enum (`FILE`, `GCP`, `K8S`, `VAULT`). - -**Example Configuration**: - -Setting `SECRET_MANAGER_TYPE=GCP` in the environment will make the factory create instances of GcpSecretManager by default. -If `SECRET_MANAGER_TYPE` is not set or is set to `FILE`, the factory defaults to creating instances of FileSecretManager. -This environment variable provides flexibility and ease of configuration, allowing different secret managers to be used in different environments or scenarios without code changes. - -## File Secert Manager - -The `FileSecretManager` is a concrete implementation of the BaseSecretManager for managing secrets stored in the file system. It uses a specified directory (defaulting to ./) to read, write, and delete secret files. - -Configuration: - -Set the environment variable `SECRET_MANAGER_DIRECTORY` to specify the directory where secrets are stored. If not set, defaults to the current directory (./). - -Usage: - -- Secrets are stored as files in the specified directory. -- Reading a secret involves fetching content from a file. -- Writing a secret creates or updates a file with the given content. -- Deleting a secret removes the corresponding file. - -## Kubernetes Secret Manager - -The `KubernetesSecretManager` interfaces with Kubernetes' native secrets system. It manages secrets within a specified Kubernetes namespace and is designed to operate within a Kubernetes cluster. - -Configuration: - -Set `K8S_NAMESPACE` environment variable to specify the Kubernetes namespace. Defaults to default if not set. Assumes Kubernetes configurations (like service account tokens) are properly set up when running within a cluster. - -Usage: - -- Secrets are stored as Kubernetes Secret objects. -- Provides functionalities to create, retrieve, and delete Kubernetes secrets. -- Handles base64 encoding and decoding as required by Kubernetes. - -## GCP Secret Manager - -The `GcpSecretManager` utilizes Google Cloud's Secret Manager service for secret management. It requires setting up with Google Cloud credentials and a project ID. - -Configuration: - -Ensure the environment variable `GOOGLE_CLOUD_PROJECT` is set with your Google Cloud project ID. - -Usage: - -- Secrets are managed using Google Cloud's Secret Manager. -- Supports operations to create, access, and delete secrets in the cloud. -- Integrates with OpenTelemetry for tracing secret management operations. - -## Hashicorp Vault Secret Manager - -The `VaultSecretManager` is tailored for Hashicorp Vault, a tool for managing sensitive data. It supports token-based authentication as well as Kubernetes-based authentication for Vault. - -Configuration: - -- Set `HASHICORP_VAULT_ADDR` to the Vault server address. Defaults to http://localhost:8200. -- Use `HASHICORP_VAULT_TOKEN` for token-based authentication. -- Set `HASHICORP_VAULT_USE_K8S` to True and provide `HASHICORP_VAULT_K8S_ROLE` for Kubernetes-based authentication. - -Usage: - -- Manages secrets in a Hashicorp Vault server. -- Provides methods to write, read, and delete secrets from Vault. -- Supports different Vault authentication methods including static tokens and Kubernetes service account tokens. diff --git a/docs/deployment/secret-store.mdx b/docs/deployment/secret-store.mdx new file mode 100644 index 0000000000..6a4db9fbb1 --- /dev/null +++ b/docs/deployment/secret-store.mdx @@ -0,0 +1,224 @@ +--- +title: "Secret Store" +sidebarTitle: "Secret Store" +--- + +## Overview + + + Secret Manager selection is crucial for securing your application. Different + modes can be set up depending on the deployment type. Our system supports four + primary secret manager types. + + +## Secret Manager Factory + +The `SecretManagerFactory` is a utility class used to create instances of different types of secret managers. It leverages the Factory design pattern to abstract the creation logic based on the type of secret manager required. The factory supports creating instances of File, GCP, Kubernetes, and Vault Secret Managers. + +The `SECRET_MANAGER_TYPE` environment variable plays a crucial role in the SecretManagerFactory for determining the default type of secret manager to be instantiated when no specific type is provided in the method call. + +**Functionality**: + +**Default Secret Manager**: If the `SECRET_MANAGER_TYPE` environment variable is set, its value dictates the default type of secret manager that the factory will create. +The value of this variable should correspond to one of the types defined in SecretManagerTypes enum (`FILE`, `AWS`, `GCP`, `K8S`, `VAULT`, `DB`). + +**Example Configuration**: + +Setting `SECRET_MANAGER_TYPE=GCP` in the environment will make the factory create instances of GcpSecretManager by default. +If `SECRET_MANAGER_TYPE` is not set or is set to `FILE`, the factory defaults to creating instances of FileSecretManager. +This environment variable provides flexibility and ease of configuration, allowing different secret managers to be used in different environments or scenarios without code changes. + +## File Secret Manager + +The `FileSecretManager` is a concrete implementation of the BaseSecretManager for managing secrets stored in the file system. It uses a specified directory (defaulting to ./) to read, write, and delete secret files. + +Configuration: + +Set the environment variable `SECRET_MANAGER_DIRECTORY` to specify the directory where secrets are stored. If not set, defaults to the current directory (./). + +Usage: + +- Secrets are stored as files in the specified directory. +- Reading a secret involves fetching content from a file. +- Writing a secret creates or updates a file with the given content. +- Deleting a secret removes the corresponding file. + +## AWS Secret Manager + +The `AwsSecretManager` integrates with Amazon Web Services' Secrets Manager service for secure secret management. It provides a robust solution for storing and managing secrets in AWS environments. + +Configuration: + +Required environment variables: + +- `AWS_REGION`: The AWS region where your secrets are stored +- For local development: + - `AWS_ACCESS_KEY_ID`: Your AWS access key + - `AWS_SECRET_ACCESS_KEY`: Your AWS secret access key + Optional: +- `AWS_KMS_KEY_ID`: The KMS key ID to use for encrypting secrets +- `AWS_SECRET_MANAGER_TAGS`: Comma-separated list of tags to add to the secret in AWS Secrets Manager, e.g. `key=value,key2=value2` +- `AWS_SECRET_ROTATION_ENABLED`: Set to `true` to enable automatic rotation of secrets (default: `false`) +- `AWS_SECRET_ROTATION_DAYS`: Number of days between automatic rotations (default: `30`) +- `AWS_SECRET_ROTATION_LAMBDA_ARN`: ARN of the Lambda function to use for secret rotation, required if rotation is enabled + +Usage: + +- Manages secrets using AWS Secrets Manager service +- Supports creating, updating, reading, and deleting secrets +- Can automatically configure secret rotation policies when creating new secrets + +### AWS Secret Rotation + +Secret rotation is a security best practice that automatically updates secrets at regular intervals. When enabled, Keep will configure newly created secrets with a rotation schedule. + +To use secret rotation: + +1. Create a Lambda function for rotating your secrets (AWS provides blueprints for common rotation scenarios) +2. Set `AWS_SECRET_ROTATION_ENABLED=true` in your environment +3. Set `AWS_SECRET_ROTATION_LAMBDA_ARN` to the ARN of your rotation Lambda function +4. Optionally set `AWS_SECRET_ROTATION_DAYS` to customize the rotation interval + +Example Lambda ARN format: `arn:aws:lambda:region:account-id:function:function-name` + +Note: Different secret types (database credentials, API keys, etc.) require different rotation logic. Make sure your Lambda function is appropriate for the type of secrets you're storing. + +## Kubernetes Secret Manager + +### Overview + +The `KubernetesSecretManager` interfaces with Kubernetes' native secrets system. + +It manages secrets within a specified Kubernetes namespace and is designed to operate within a Kubernetes cluster. + +### Configuration + +- `SECRET_MANAGER_TYPE=k8s` +- `K8S_NAMESPACE=keep` - environment variable to specify the Kubernetes namespace. Defaults to `.metadata.namespace` if not set. Assumes Kubernetes configurations (like service account tokens) are properly set up when running within a cluster. +- `K8S_VERIFY_SSL_CERT=true` - environment variable to specify whether to verify the SSL certificate of the Kubernetes API. Defaults to `true`. + +Usage: + +- Secrets are stored as Kubernetes Secret objects. +- Provides functionalities to create, retrieve, and delete Kubernetes secrets. +- Handles base64 encoding and decoding as required by Kubernetes. + +### Environment Variables From Secrets + +The Kubernetes Secret Manager integration allows Keep to fetch environment variables from Kubernetes Secrets. + +For sensitive environment variables, such as `DATABASE_CONNECTION_STRING`, it is recommended to store as a secret: + +#### Creating Database Connection Secret + +```bash +# Create the base64 encoded string without newline +CONNECTION_STRING_B64=$(echo -n "mysql+pymysql://user:password@host:3306/dbname" | base64) + +# Create the Kubernetes secret +kubectl create secret generic keep-db-secret \ + --namespace=keep \ + --from-literal=connection_string=$(echo -n "mysql+pymysql://user:password@host:3306/dbname" | base64) + +# Or using a YAML file: +cat <If you are using Keep and have performance issues, we will be more than happy to help you. Just join our [slack](https://slack.keepqh.dev) and shoot a message on the **#help** channel. + +## Overview + +Spec and stress testing are crucial to ensuring the robust performance and scalability of Keep. +This documentation outlines the key areas of focus for testing Keep under different load conditions, considering both the simplicity of setup for smaller environments and the scalability mechanisms for larger deployments. + +Keep was initially designed to be user-friendly for setups handling less than 10,000 alerts. However, as alert volumes increase, users can leverage advanced features such as Elasticsearch for document storage and Redis + ARQ for queue-based alert ingestion. While these advanced configurations are not fully documented here, they are supported and can be discussed further in our Slack community. + +## How To Reproduce + +To reproduce the stress testing scenarios mentioned above, please refer to the [STRESS.md](https://github.com/keephq/keep/blob/main/STRESS.md) file in Keep's repository. This document provides step-by-step instructions on how to set up, run, and measure the performance of Keep under different load conditions. + +## Performance Testing + +### Factors Affecting Specifications + +The primary parameters that affect the specification requirements for Keep are: +1. **Alerts Volume**: The rate at which alerts are ingested into the system. +2. **Total Alerts**: The cumulative number of alerts stored in the system. +3. **Number of Workflows**: How many automation run as a result of alert. + +### Main Components: +- **Keep Backend** - API and business logic. A container that serves FastAPI on top of gunicorn. +- **Keep Frontend** - Web app. A container that serves the react app. +- **Database** - Stores the alerts and any other operational data. +- **Elasticsearch** (opt out by default) - Stores alerts as document for better search performance. +- **Redis** (opt out by default) - Used, together with ARQ, as an alerts queue. + +### Testing Scenarios: + +- **Low Volume (< 10,000 total alerts, hundreds of alerts per day)**: + - **Setup**: Use a standard relational database (e.g., MySQL, PostgreSQL) with default configurations. + - **Expectations**: Keep should handle queries and alert ingestion with minimal resource usage. + +- **Medium Volume (10,000 - 100,000 total alerts, thousands of alerts per day)**: + - **Setup**: Scale the database to larger instances or clusters. Adjust best practices to the DB (e.g. increasing innodb_buffer_pool_size) + - **Expectations**: CPU and RAM usage should increase proportionally but remain within acceptable limits. + +3. **High Volume (100,000 - 1,000,000 total alerts, >five thousands of alerts per day)**: + - **Setup**: Deploy Keep with Elasticsearch for storing alerts as documents. + - **Expectations**: The system should maintain performance levels despite the large alert volume, with increased resource usage managed through scaling strategies. +4. **Very High Volume (> 1,000,000 total alerts, tens of thousands of alerts per day)**: + - **Setup**: Deploy Keep with Elasticsearch for storing alerts as documents. + - **Setup #2**: Deploy Keep with Redis and with ARQ to use Redis as a queue. + +## Recommended Specifications by Alert Volume + +| **Number of Alerts** | **Keep Backend** | **Keep Database** | **Redis** | **Elasticsearch** | +|------------------------|------------------------------------------------|-------------------------------------------------|------------------------------------------------|------------------------------------------------| +| **< 10,000** | 1 vCPUs, 2GB RAM | 2 vCPUs, 8GB RAM | Not required | Not required | +| **10,000 - 100,000** | 4 vCPUs, 8GB RAM | 8 vCPUs, 32GB RAM, optimized indexing | Not required | Not required | +| **100,000 - 500,000** | 8 vCPUs, 16GB RAM | 8 vCPUs, 32GB RAM, advanced indexing | 4 vCPUs, 8GB RAM | 8 vCPUs, 32GB RAM, 2-3 nodes | +| **> 500,000** | 8 vCPUs, 16GB RAM | 8 vCPUs, 32GB RAM, advanced indexing, sharding| 4 vCPUs, 8GB RAM | 8 vCPUs, 32GB RAM, 2-3 nodes | + +## Performance by Operation Type, Load, and Specification + +| **Operation Type** | **Load** | **Specification** | **Execution Time** | +|-----------------------|----------------------------|------------------------------|-----------------------------------| +| Digest Alert | 100 alerts per minute | 4 vCPUs, 8GB RAM | ~0.5 seconds | +| Digest Alert | 500 alerts per minute | 8 vCPUs, 16GB RAM | ~1 second | +| Digest Alert | 1,000 alerts per minute | 16 vCPUs, 32GB RAM | ~1.5 seconds | +| Run Workflow | 10 workflows per minute | 4 vCPUs, 8GB RAM | ~1 second | +| Run Workflow | 50 workflows per minute | 8 vCPUs, 16GB RAM | ~2 seconds | +| Run Workflow | 100 workflows per minute | 16 vCPUs, 32GB RAM | ~3 seconds | +| Ingest via Queue | 100 alerts per minute | 4 vCPUs, 8GB RAM, Redis | ~0.3 seconds | +| Ingest via Queue | 500 alerts per minute | 8 vCPUs, 16GB RAM, Redis | ~0.8 seconds | +| Ingest via Queue | 1,000 alerts per minute | 16 vCPUs, 32GB RAM, Redis | ~1.2 seconds | + +### Table Explanation: +- **Operation Type**: The specific operation being tested (e.g., digesting alerts, running workflows). +- **Load**: The number of operations per minute being processed (e.g., number of alerts per minute). +- **Specification**: The CPU, RAM, and additional services used for the operation. +- **Execution Time**: Approximate time taken to complete the operation under the given load and specification. + + +## Fine Tuning + +As any deployment has its own characteristics, such as the balance between volume vs. total count of alerts or volume vs. number of workflows, Keep can be fine-tuned with the following parameters: + +1. **Number of Workers**: Adjust the number of Gunicorn workers to handle API requests more efficiently. You can also start additional API servers to distribute the load. +2. **Distinguish Between API Server Workers and Digesting Alerts Workers**: Separate the workers dedicated to handling API requests from those responsible for digesting alerts, ensuring that each set of tasks is optimized according to its specific needs. +3. **Add More RAM to the Database**: Increasing the RAM allocated to your database can help manage larger datasets and improve query performance, particularly when dealing with high volumes of alerts. +4. **Optimize Database Configuration**: Keep was mainly tested on MySQL and PostgreSQL. Different database may have different fine tuning mechanisms. +5. **Horizontal Scaling**: Consider deploying additional instances of the API and database services to distribute the load more effectively. + + + +## FAQ + +### 1. How do I estimate the spec I need for Keep? +To estimate the specifications required for Keep, consider both the number of alerts per minute and the total number of alerts you expect to handle. Refer to the **Recommended Specifications by Alert Volume** table above to match your expected load with the appropriate resources. + +### 2. How do I know if I need Elasticsearch? +Elasticsearch is typically needed when you are dealing with more than 50,000 total alerts or if you require advanced search and query capabilities that are not efficiently handled by a traditional relational database. If your system’s performance degrades significantly as alert volume increases, it may be time to consider Elasticsearch. + +### 3. How do I know if I need Redis? +Redis is recommended when your alert ingestion rate exceeds 1,000 alerts per minute or when you notice that the API is becoming a bottleneck due to high ingestion rates. Redis, combined with ARQ (Asynchronous Redis Queue), can help manage and distribute the load more effectively. + +### 4. What should I do if Keep's performance is still inadequate? +If you have scaled according to the recommendations and are still facing performance issues, consider: +- **Optimizing your database configuration**: Indexing, sharding, and query optimization can make a significant difference. +- **Horizontal scaling**: Distribute the load across multiple instances of the API and database services. +- **Reach out to our Slack community**: For personalized support, reach out to us on Slack, and we’ll help you troubleshoot and optimize your Keep deployment. + +For any additional questions or tailored advice, feel free to join our Slack community where our team and other users are available to assist you. diff --git a/docs/development/external-url.mdx b/docs/development/external-url.mdx index 70528f532e..e73ed2b99d 100644 --- a/docs/development/external-url.mdx +++ b/docs/development/external-url.mdx @@ -1,6 +1,6 @@ --- -title: "Keep with Internet URL" -sidebarTitle: "Keep with Internet URL" +title: "Keep with an external URL" +sidebarTitle: "Keep with an external URL" --- ## Introduction diff --git a/docs/development/getting-started.mdx b/docs/development/getting-started.mdx index 998f339cf0..ab3372db73 100644 --- a/docs/development/getting-started.mdx +++ b/docs/development/getting-started.mdx @@ -13,9 +13,54 @@ git clone https://github.com/keephq/keep.git && cd keep Next, run ``` -docker-compose -f docker-compose.dev.yml up +docker compose -f docker-compose.dev.yml up ``` +### Install Keep CLI + +First, clone Keep repository: + +```shell +git clone https://github.com/keephq/keep.git && cd keep +``` + +Install Keep CLI + +```shell +poetry install +``` + +To access the Keep CLI activate the environment, and access from shell. + +```shell +poetry shell +``` + +From now on, Keep should be installed locally and accessible from your CLI, test it by executing: + +``` +keep version +``` + +## Enable Auto Completion + +**Keep's CLI supports shell auto-completion, which can make your life a whole lot easier 😌** + +If you're using zsh + +```shell title=~/.zshrc +eval "$(_KEEP_COMPLETE=zsh_source keep)" +``` + +If you're using bash + +```bash title=~/.bashrc +eval "$(_KEEP_COMPLETE=bash_source keep)" +``` + +> Using eval means that the command is invoked and evaluated every time a shell is started, which can delay shell responsiveness. To speed it up, write the generated script to a file, then source that. + + ### Testing Run unittests: @@ -33,13 +78,13 @@ poetry run coverage run --branch -m pytest -s tests/e2e_tests/ Migrations are automatically executed on a server startup. To create a migration: ```bash -cd keep && alembic revision --autogenerate -m "Your message" +alembic -c keep/alembic.ini revision --autogenerate -m "Your message" ``` Hint: make sure your models are imported at `./api/models/db/migrations/env.py` for autogenerator to pick them up. -## VSCode -You can run Keep from your VSCode (after cloning the repo) by adding this configurations to your `.vscode/launch.json`: +## VS Code (or Cursor) +Run Keep from your VS Code (or Cursor) after cloning the repo by adding this configurations to your `.vscode/launch.json`: ```json { @@ -52,6 +97,7 @@ You can run Keep from your VSCode (after cloning the repo) by adding this config "program": "keep/cli/cli.py", "console": "integratedTerminal", "justMyCode": false, + "python": "venv/bin/python", "args": ["--json", "api","--multi-tenant"], "env": { "PYDEVD_DISABLE_FILE_VALIDATION": "1", @@ -72,6 +118,7 @@ You can run Keep from your VSCode (after cloning the repo) by adding this config "program": "scripts/simulate_alerts.py", "console": "integratedTerminal", "justMyCode": false, + "python": "venv/bin/python", "env": { "PYDEVD_DISABLE_FILE_VALIDATION": "1", "PYTHONPATH": "${workspaceFolder}/", @@ -92,9 +139,17 @@ You can run Keep from your VSCode (after cloning the repo) by adding this config Install dependencies: ``` +python3.11 -m venv venv; +source venv/bin/activate; pip install poetry; poetry install; -cd keep-ui && npm i; +cd keep-ui && npm i && cd ..; +``` + +Set frontend envs: +``` +cp keep-ui/.env.local.example keep-ui/.env.local; +echo "\n\n\n\nNEXTAUTH_SECRET="$(openssl rand -hex 32) >> keep-ui/.env.local; ``` Launch Pusher ([soketi](https://soketi.app/)) container in parallel: @@ -103,21 +158,21 @@ docker run -d -p 6001:6001 -p 9601:9601 -e SOKETI_USER_AUTHENTICATION_TIMEOUT=30 ``` -## VSCode + Docker -For this guide to work, the VSCode Docker extension is required. +## VS Code (or Cursor) + Docker +For this guide to work, the [VS Code Docker](https://marketplace.visualstudio.com/items?itemName=ms-azuretools.vscode-docker) extension is required. In air-gapped environments, you might consider building the container on an internet-connected computer, exporting the image using docker save, transferring it with docker load in the air-gapped environment, and then using the run configuration. -In cases where you want to develop Keep but are unable to run it directly on your local laptop (e.g., with Windows), or if you lack access to all of its dependencies (e.g., in air-gapped environments), you can still accomplish this using VSCode and Docker. +In cases where you want to develop Keep but are unable to run it directly on your local laptop (e.g., with Windows), or if you lack access to all of its dependencies (e.g., in air-gapped environments), you can still accomplish this using VS Code (or Cursor) and Docker. To achieve this, follow these steps: -1. Clone Keep and open it with VSCode +1. Clone Keep and open it with VS Code (or Cursor) 2. Create a tasks.json file to build and run the Keep API and Keep UI containers. 3. Create a launch.json configuration to start the containers and attach a debugger to them. 4. Profit. -### Clone Keep and open it with VSCode +### Clone Keep and open it with VS Code (or Cursor) ``` git clone https://github.com/keephq/keep.git && cd keep code . @@ -130,14 +185,14 @@ code . { "version": "2.0.0", "tasks": [ - # The API and UI containers needs to be in the same docker network + // The API and UI containers needs to be in the same docker network { "label": "docker-create-network", "type": "shell", "command": "docker network create keep-network || true", "problemMatcher": [] }, - # Build the api container + // Build the api container { "label": "docker-build-api-dev", "type": "docker-build", @@ -147,7 +202,7 @@ code . "tag": "keep-api-dev:latest" } }, - # Run the api container + // Run the api container { "label": "docker-run-api-dev", "type": "docker-run", @@ -174,7 +229,7 @@ code . "DEBUG": "1", "SECRET_MANAGER_TYPE": "FILE", "USE_NGROK": "false", - "AUTH_TYPE": "SINGLE_TENANT" + "AUTH_TYPE": "DB" }, "volumes": [ { @@ -184,7 +239,7 @@ code . ] } }, - # Build the UI container + // Build the UI container { "label": "docker-build-ui", "type": "docker-build", @@ -194,7 +249,7 @@ code . "tag": "keep-ui-dev:latest" } }, - # Run the UI container + // Run the UI container { "type": "docker-run", "label": "docker-run-ui", @@ -209,8 +264,8 @@ code . // Uncomment for fully debug // "DEBUG": "*", "NODE_ENV": "development", - "API_URL": "http://keep-api:8080" - "AUTH_TYPE": "SINGLE_TENANT", + "API_URL": "http://keep-api:8080", + "AUTH_TYPE": "DB", }, "volumes": [ { @@ -281,7 +336,7 @@ code . "DEBUG": "1", "SECRET_MANAGER_TYPE": "FILE", "USE_NGROK": "false", - "AUTH_TYPE": "SINGLE_TENANT" + "AUTH_TYPE": "DB" }, "volumes": [ { @@ -307,7 +362,7 @@ code . // "DEBUG": "*", "NODE_ENV": "development", "API_URL": "http://keep-api:8080", - "AUTH_TYPE": "SINGLE_TENANT" + "AUTH_TYPE": "DB" }, "volumes": [ { diff --git a/docs/development/installation.mdx b/docs/development/installation.mdx deleted file mode 100644 index ce0184b4e6..0000000000 --- a/docs/development/installation.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: "Deployment" -sidebarTitle: "Overview" -description: "After writing some alerts with Keep, you may now want to use Keep in production! For that, you can easily deploy Keep on an environment other than your local station." ---- - -Keep currently supports [Docker](#docker) and [Render](#render). - - - Want to deploy Keep on a specific platform that is not yet supported? [Just - open an - issue](https://github.com/keephq/keep/issues/new?assignees=&labels=&template=feature_request.md&title=feature:%20new%20deployment%20option) - and we will get to it ASAP! - - -## E2E - -### Docker - -## CLI - -Run _Keep_ alerting engine (The CLI) - -### Docker - -Configure the Slack provider (See "[Run locally](https://github.com/keephq/keep#get-a-slack-incoming-webhook-using-this-tutorial-and-use-keep-to-configure-it)" on how to obtain the webhook URL) - -```bash -docker run -v ${PWD}:/app -it us-central1-docker.pkg.dev/keephq/keep/keep-cli config provider --provider-type slack --provider-id slack-demo -``` - -You should now have a providers.yaml file created locally - -Run Keep and execute our example "Paper DB has insufficient disk space" alert - -```bash -docker run -v ${PWD}:/app -it us-central1-docker.pkg.dev/keephq/keep/keep-cli -j run --alert-url https://raw.githubusercontent.com/keephq/keep/main/examples/alerts/db_disk_space.yml -``` - -### Render - -Click the Deploy to Render button to deploy Keep as a background worker running in [Render](https://www.render.com) - -[![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://render.com/deploy?repo=https://github.com/keephq/keep) - -To run Keep and execute our example "Paper DB has insufficient disk space" alert, you will need to configure you Slack provider. -When clicking the Deploy to Render button, you will be asked to provide the `KEEP_PROVIDER_SLACK_DEMO` environment variable, this is the expected format: - -```json -{ "authentication": { "webhook_url": "https://hooks.slack.com/services/..." } } -``` - -\*\* `KEEP_PROVIDER_PROVIDER_ID` is the way you can configure providers using environment variables
-\*\* Refer to [Run locally](https://github.com/keephq/keep#get-a-slack-incoming-webhook-using-this-tutorial-and-use-keep-to-configure-it) on how to obtain a Slack webhook URL or on how to obtain Keep's webhook. diff --git a/docs/development/overview.mdx b/docs/development/overview.mdx deleted file mode 100644 index fe47fb7904..0000000000 --- a/docs/development/overview.mdx +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: "Quickstart" ---- -## Run Locally - -### Docker-compose (Option 1) - -Run _Keep_ full stack (Console & API) - -```bash -docker-compose up -``` -Or - -```bash -docker-compose -f docker-compose.dev.yml up --build -``` -If you want to run *Keep* in [development mode](https://development-mode-url) (code compiles on changes) - - - **OpenAPI Integration** - - Please note that some features used by Keep requires OpenAI API key to work. Export `OPENAI_API_KEY=sk-YOUR_API_KEY` before running docker-compose to make them available. - - For example: - - ```bash - OPENAI_API_KEY=sk-YOUR_API_KEY docker-compose up - ``` - - -### Clone and install (Option 2) -Try our first mock alert and get it up and running in - -**First, clone Keep repository:** - -```shell -git clone https://github.com/keephq/keep.git && cd keep -``` - -**Install Keep CLI** - -```shell -pip install . -``` -or - -```shell -poetry install -``` - -**From now on, Keep should be installed locally and accessible from your CLI, test it by executing:** - -``` -keep version -``` - -**Get a Slack Incoming Webhook using [this tutorial](https://api.slack.com/messaging/webhooks) and use use Keep to configure it** - -``` -keep config provider --provider-type slack --provider-id slack-demo -``` -Paste the Slack Incoming Webhook URL (e.g. https://hooks.slack.com/services/...) and you're good to go 👌 - -**Let's now execute our example "Paper DB has insufficient disk space" alert** - -```bash -keep run --alerts-file examples/alerts/db_disk_space.yml -``` - -**Congrats 🥳 You should have received your first "Dunder Mifflin Paper Company" alert in Slack by now.** - -Wanna have your alerts up and running in production? Go through our more detailed [Deployment Guide](https://keephq.wiki/deployment). - -## Enable Auto Completion - -**Keep's CLI supports shell auto-completion, which can make your life a whole lot easier 😌** - -If you're using zsh - -```shell title=~/.zshrc -eval "$(_KEEP_COMPLETE=zsh_source keep)" -``` - -If you're using bash - -```bash title=~/.bashrc -eval "$(_KEEP_COMPLETE=bash_source keep)" -``` - -> Using eval means that the command is invoked and evaluated every time a shell is started, which can delay shell responsiveness. To speed it up, write the generated script to a file, then source that. diff --git a/docs/generate_readme_from_openapijson.sh b/docs/generate_readme_from_openapijson.sh deleted file mode 100755 index 439dc8f6e9..0000000000 --- a/docs/generate_readme_from_openapijson.sh +++ /dev/null @@ -1,7 +0,0 @@ - -#!/bin/bash - -# Before running this script, make sure you have update the openapi.json from the backend, (/docs route) - -python3 openapi_converter.py --source ./openapi.json --dest ./openapi.json -npx @mintlify/scraping@latest openapi-file ./openapi.json -o ./api-ref \ No newline at end of file diff --git a/docs/images/ai-correlation-1.png b/docs/images/ai-correlation-1.png new file mode 100644 index 0000000000..61374611ae Binary files /dev/null and b/docs/images/ai-correlation-1.png differ diff --git a/docs/images/ai-correlation-2.png b/docs/images/ai-correlation-2.png new file mode 100644 index 0000000000..08aea5e9db Binary files /dev/null and b/docs/images/ai-correlation-2.png differ diff --git a/docs/images/ai-semi-automatic-correlation.png b/docs/images/ai-semi-automatic-correlation.png new file mode 100644 index 0000000000..07490c9654 Binary files /dev/null and b/docs/images/ai-semi-automatic-correlation.png differ diff --git a/docs/images/ai-workflow-assistant.png b/docs/images/ai-workflow-assistant.png new file mode 100644 index 0000000000..4b6aa6cb0f Binary files /dev/null and b/docs/images/ai-workflow-assistant.png differ diff --git a/docs/images/airflow_1.png b/docs/images/airflow_1.png new file mode 100644 index 0000000000..6ea1b98741 Binary files /dev/null and b/docs/images/airflow_1.png differ diff --git a/docs/images/airflow_2.png b/docs/images/airflow_2.png new file mode 100644 index 0000000000..3513d13d55 Binary files /dev/null and b/docs/images/airflow_2.png differ diff --git a/docs/images/alert_table_1.png b/docs/images/alert_table_1.png new file mode 100644 index 0000000000..541b4ab805 Binary files /dev/null and b/docs/images/alert_table_1.png differ diff --git a/docs/images/alert_table_menu_1.png b/docs/images/alert_table_menu_1.png new file mode 100644 index 0000000000..2a1cc3c9de Binary files /dev/null and b/docs/images/alert_table_menu_1.png differ diff --git a/docs/images/alert_table_sidebar.png b/docs/images/alert_table_sidebar.png new file mode 100644 index 0000000000..bd57d1acc8 Binary files /dev/null and b/docs/images/alert_table_sidebar.png differ diff --git a/docs/images/alert_table_table_1.png b/docs/images/alert_table_table_1.png new file mode 100644 index 0000000000..6d78ad12cc Binary files /dev/null and b/docs/images/alert_table_table_1.png differ diff --git a/docs/images/alert_table_table_2.png b/docs/images/alert_table_table_2.png new file mode 100644 index 0000000000..949d018170 Binary files /dev/null and b/docs/images/alert_table_table_2.png differ diff --git a/docs/images/alert_table_table_3.png b/docs/images/alert_table_table_3.png new file mode 100644 index 0000000000..1897b4b226 Binary files /dev/null and b/docs/images/alert_table_table_3.png differ diff --git a/docs/images/alert_table_table_4.png b/docs/images/alert_table_table_4.png new file mode 100644 index 0000000000..bc5fda08eb Binary files /dev/null and b/docs/images/alert_table_table_4.png differ diff --git a/docs/images/alert_table_table_5.png b/docs/images/alert_table_table_5.png new file mode 100644 index 0000000000..1704dab9dc Binary files /dev/null and b/docs/images/alert_table_table_5.png differ diff --git a/docs/images/alert_table_table_6.png b/docs/images/alert_table_table_6.png new file mode 100644 index 0000000000..688d8ef13e Binary files /dev/null and b/docs/images/alert_table_table_6.png differ diff --git a/docs/images/alert_table_table_7.png b/docs/images/alert_table_table_7.png new file mode 100644 index 0000000000..1310538696 Binary files /dev/null and b/docs/images/alert_table_table_7.png differ diff --git a/docs/images/alert_table_table_8.png b/docs/images/alert_table_table_8.png new file mode 100644 index 0000000000..6fe36f478c Binary files /dev/null and b/docs/images/alert_table_table_8.png differ diff --git a/docs/images/alert_table_table_9.png b/docs/images/alert_table_table_9.png new file mode 100644 index 0000000000..6f4a6d5498 Binary files /dev/null and b/docs/images/alert_table_table_9.png differ diff --git a/docs/images/alert_table_table_sort.gif b/docs/images/alert_table_table_sort.gif new file mode 100644 index 0000000000..eb3a21127d Binary files /dev/null and b/docs/images/alert_table_table_sort.gif differ diff --git a/docs/images/alerthistory.png b/docs/images/alerthistory.png deleted file mode 100644 index 17fff3a18b..0000000000 Binary files a/docs/images/alerthistory.png and /dev/null differ diff --git a/docs/images/alertspage.png b/docs/images/alertspage.png deleted file mode 100644 index b2a12b0cf3..0000000000 Binary files a/docs/images/alertspage.png and /dev/null differ diff --git a/docs/images/appdynamics_1.png b/docs/images/appdynamics_1.png new file mode 100644 index 0000000000..ab13ca82cb Binary files /dev/null and b/docs/images/appdynamics_1.png differ diff --git a/docs/images/appdynamics_10.png b/docs/images/appdynamics_10.png new file mode 100644 index 0000000000..40c51be108 Binary files /dev/null and b/docs/images/appdynamics_10.png differ diff --git a/docs/images/appdynamics_2.png b/docs/images/appdynamics_2.png new file mode 100644 index 0000000000..fbb1c9f9c0 Binary files /dev/null and b/docs/images/appdynamics_2.png differ diff --git a/docs/images/appdynamics_3.png b/docs/images/appdynamics_3.png new file mode 100644 index 0000000000..5ee96bdd4b Binary files /dev/null and b/docs/images/appdynamics_3.png differ diff --git a/docs/images/appdynamics_4.png b/docs/images/appdynamics_4.png new file mode 100644 index 0000000000..5acd5be834 Binary files /dev/null and b/docs/images/appdynamics_4.png differ diff --git a/docs/images/appdynamics_5.png b/docs/images/appdynamics_5.png new file mode 100644 index 0000000000..04aeb2449e Binary files /dev/null and b/docs/images/appdynamics_5.png differ diff --git a/docs/images/appdynamics_6.png b/docs/images/appdynamics_6.png new file mode 100644 index 0000000000..3b5a67ed68 Binary files /dev/null and b/docs/images/appdynamics_6.png differ diff --git a/docs/images/appdynamics_7.png b/docs/images/appdynamics_7.png new file mode 100644 index 0000000000..3bc5cfb6b8 Binary files /dev/null and b/docs/images/appdynamics_7.png differ diff --git a/docs/images/appdynamics_8.png b/docs/images/appdynamics_8.png new file mode 100644 index 0000000000..be94a6b881 Binary files /dev/null and b/docs/images/appdynamics_8.png differ diff --git a/docs/images/appdynamics_9.png b/docs/images/appdynamics_9.png new file mode 100644 index 0000000000..ed91c73fe6 Binary files /dev/null and b/docs/images/appdynamics_9.png differ diff --git a/docs/images/asana-provider_1.png b/docs/images/asana-provider_1.png new file mode 100644 index 0000000000..8143b2516d Binary files /dev/null and b/docs/images/asana-provider_1.png differ diff --git a/docs/images/asana-provider_2.png b/docs/images/asana-provider_2.png new file mode 100644 index 0000000000..82052f03ef Binary files /dev/null and b/docs/images/asana-provider_2.png differ diff --git a/docs/images/asana-provider_3.png b/docs/images/asana-provider_3.png new file mode 100644 index 0000000000..a709c32a5a Binary files /dev/null and b/docs/images/asana-provider_3.png differ diff --git a/docs/images/multitenant.png b/docs/images/auth0auth.png similarity index 100% rename from docs/images/multitenant.png rename to docs/images/auth0auth.png diff --git a/docs/images/axiom-provider-1.png b/docs/images/axiom-provider-1.png new file mode 100644 index 0000000000..399b4ae79c Binary files /dev/null and b/docs/images/axiom-provider-1.png differ diff --git a/docs/images/axiom-provider-2.png b/docs/images/axiom-provider-2.png new file mode 100644 index 0000000000..97c1232816 Binary files /dev/null and b/docs/images/axiom-provider-2.png differ diff --git a/docs/images/axiom-provider-3.png b/docs/images/axiom-provider-3.png new file mode 100644 index 0000000000..ad2d85c463 Binary files /dev/null and b/docs/images/axiom-provider-3.png differ diff --git a/docs/images/axiom-provider-4.png b/docs/images/axiom-provider-4.png new file mode 100644 index 0000000000..1064852d45 Binary files /dev/null and b/docs/images/axiom-provider-4.png differ diff --git a/docs/images/axiom-provider-5.png b/docs/images/axiom-provider-5.png new file mode 100644 index 0000000000..6cc73ace47 Binary files /dev/null and b/docs/images/axiom-provider-5.png differ diff --git a/docs/images/axiom-provider-6.png b/docs/images/axiom-provider-6.png new file mode 100644 index 0000000000..c037afbe16 Binary files /dev/null and b/docs/images/axiom-provider-6.png differ diff --git a/docs/images/axiom-provider-7.png b/docs/images/axiom-provider-7.png new file mode 100644 index 0000000000..6e0982aa55 Binary files /dev/null and b/docs/images/axiom-provider-7.png differ diff --git a/docs/images/azuread_1.png b/docs/images/azuread_1.png new file mode 100644 index 0000000000..6b83bc8cf0 Binary files /dev/null and b/docs/images/azuread_1.png differ diff --git a/docs/images/azuread_10.png b/docs/images/azuread_10.png new file mode 100644 index 0000000000..eb39b08d74 Binary files /dev/null and b/docs/images/azuread_10.png differ diff --git a/docs/images/azuread_11.png b/docs/images/azuread_11.png new file mode 100644 index 0000000000..964caf880d Binary files /dev/null and b/docs/images/azuread_11.png differ diff --git a/docs/images/azuread_12.png b/docs/images/azuread_12.png new file mode 100644 index 0000000000..dc6758ff7a Binary files /dev/null and b/docs/images/azuread_12.png differ diff --git a/docs/images/azuread_13.png b/docs/images/azuread_13.png new file mode 100644 index 0000000000..34ba721670 Binary files /dev/null and b/docs/images/azuread_13.png differ diff --git a/docs/images/azuread_14.png b/docs/images/azuread_14.png new file mode 100644 index 0000000000..3d5d0b384f Binary files /dev/null and b/docs/images/azuread_14.png differ diff --git a/docs/images/azuread_15.png b/docs/images/azuread_15.png new file mode 100644 index 0000000000..c27483370c Binary files /dev/null and b/docs/images/azuread_15.png differ diff --git a/docs/images/azuread_16.png b/docs/images/azuread_16.png new file mode 100644 index 0000000000..2f7dd8977f Binary files /dev/null and b/docs/images/azuread_16.png differ diff --git a/docs/images/azuread_2.png b/docs/images/azuread_2.png new file mode 100644 index 0000000000..c573680299 Binary files /dev/null and b/docs/images/azuread_2.png differ diff --git a/docs/images/azuread_3.png b/docs/images/azuread_3.png new file mode 100644 index 0000000000..4e91c72630 Binary files /dev/null and b/docs/images/azuread_3.png differ diff --git a/docs/images/azuread_4.png b/docs/images/azuread_4.png new file mode 100644 index 0000000000..8d995462b0 Binary files /dev/null and b/docs/images/azuread_4.png differ diff --git a/docs/images/azuread_5.png b/docs/images/azuread_5.png new file mode 100644 index 0000000000..2096904219 Binary files /dev/null and b/docs/images/azuread_5.png differ diff --git a/docs/images/azuread_6.png b/docs/images/azuread_6.png new file mode 100644 index 0000000000..e5b44ffbc0 Binary files /dev/null and b/docs/images/azuread_6.png differ diff --git a/docs/images/azuread_7.png b/docs/images/azuread_7.png new file mode 100644 index 0000000000..1ea01ed689 Binary files /dev/null and b/docs/images/azuread_7.png differ diff --git a/docs/images/azuread_8.png b/docs/images/azuread_8.png new file mode 100644 index 0000000000..6700a9b019 Binary files /dev/null and b/docs/images/azuread_8.png differ diff --git a/docs/images/azuread_9.png b/docs/images/azuread_9.png new file mode 100644 index 0000000000..bb497af697 Binary files /dev/null and b/docs/images/azuread_9.png differ diff --git a/docs/images/chart_example_1.webp b/docs/images/chart_example_1.webp new file mode 100644 index 0000000000..48692f0ac2 Binary files /dev/null and b/docs/images/chart_example_1.webp differ diff --git a/docs/images/chart_example_2.webp b/docs/images/chart_example_2.webp new file mode 100644 index 0000000000..83d0105e67 Binary files /dev/null and b/docs/images/chart_example_2.webp differ diff --git a/docs/images/checkly-provider_1.png b/docs/images/checkly-provider_1.png new file mode 100644 index 0000000000..f797b2e039 Binary files /dev/null and b/docs/images/checkly-provider_1.png differ diff --git a/docs/images/checkly-provider_10.png b/docs/images/checkly-provider_10.png new file mode 100644 index 0000000000..0628ccd7bc Binary files /dev/null and b/docs/images/checkly-provider_10.png differ diff --git a/docs/images/checkly-provider_11.png b/docs/images/checkly-provider_11.png new file mode 100644 index 0000000000..4dfa318c6f Binary files /dev/null and b/docs/images/checkly-provider_11.png differ diff --git a/docs/images/checkly-provider_12.png b/docs/images/checkly-provider_12.png new file mode 100644 index 0000000000..2fbe5fa68e Binary files /dev/null and b/docs/images/checkly-provider_12.png differ diff --git a/docs/images/checkly-provider_2.png b/docs/images/checkly-provider_2.png new file mode 100644 index 0000000000..0ac91e04ee Binary files /dev/null and b/docs/images/checkly-provider_2.png differ diff --git a/docs/images/checkly-provider_3.png b/docs/images/checkly-provider_3.png new file mode 100644 index 0000000000..e974e1fbb8 Binary files /dev/null and b/docs/images/checkly-provider_3.png differ diff --git a/docs/images/checkly-provider_4.png b/docs/images/checkly-provider_4.png new file mode 100644 index 0000000000..58ae31ad1d Binary files /dev/null and b/docs/images/checkly-provider_4.png differ diff --git a/docs/images/checkly-provider_5.png b/docs/images/checkly-provider_5.png new file mode 100644 index 0000000000..3b3946e92c Binary files /dev/null and b/docs/images/checkly-provider_5.png differ diff --git a/docs/images/checkly-provider_6.png b/docs/images/checkly-provider_6.png new file mode 100644 index 0000000000..71cc7483a0 Binary files /dev/null and b/docs/images/checkly-provider_6.png differ diff --git a/docs/images/checkly-provider_7.png b/docs/images/checkly-provider_7.png new file mode 100644 index 0000000000..894a5f3c33 Binary files /dev/null and b/docs/images/checkly-provider_7.png differ diff --git a/docs/images/checkly-provider_8.png b/docs/images/checkly-provider_8.png new file mode 100644 index 0000000000..1f46abf61a Binary files /dev/null and b/docs/images/checkly-provider_8.png differ diff --git a/docs/images/checkly-provider_9.png b/docs/images/checkly-provider_9.png new file mode 100644 index 0000000000..c9205b53f0 Binary files /dev/null and b/docs/images/checkly-provider_9.png differ diff --git a/docs/images/checkmk-provider_1.png b/docs/images/checkmk-provider_1.png new file mode 100644 index 0000000000..240951859a Binary files /dev/null and b/docs/images/checkmk-provider_1.png differ diff --git a/docs/images/checkmk-provider_2.png b/docs/images/checkmk-provider_2.png new file mode 100644 index 0000000000..440bed9e63 Binary files /dev/null and b/docs/images/checkmk-provider_2.png differ diff --git a/docs/images/checkmk-provider_3.png b/docs/images/checkmk-provider_3.png new file mode 100644 index 0000000000..7de3bf12d9 Binary files /dev/null and b/docs/images/checkmk-provider_3.png differ diff --git a/docs/images/checkmk-provider_4.png b/docs/images/checkmk-provider_4.png new file mode 100644 index 0000000000..8edd50a8c2 Binary files /dev/null and b/docs/images/checkmk-provider_4.png differ diff --git a/docs/images/cilium_topology_map.png b/docs/images/cilium_topology_map.png new file mode 100644 index 0000000000..05a4c3c1e7 Binary files /dev/null and b/docs/images/cilium_topology_map.png differ diff --git a/docs/images/connect-to-pagerduty.png b/docs/images/connect-to-pagerduty.png new file mode 100644 index 0000000000..f8eeebd879 Binary files /dev/null and b/docs/images/connect-to-pagerduty.png differ diff --git a/docs/images/coralogix-provider_1.png b/docs/images/coralogix-provider_1.png new file mode 100644 index 0000000000..b6965352d2 Binary files /dev/null and b/docs/images/coralogix-provider_1.png differ diff --git a/docs/images/coralogix-provider_2.png b/docs/images/coralogix-provider_2.png new file mode 100644 index 0000000000..2a127a10f2 Binary files /dev/null and b/docs/images/coralogix-provider_2.png differ diff --git a/docs/images/coralogix-provider_3.png b/docs/images/coralogix-provider_3.png new file mode 100644 index 0000000000..44efcf61c8 Binary files /dev/null and b/docs/images/coralogix-provider_3.png differ diff --git a/docs/images/coralogix-provider_4.png b/docs/images/coralogix-provider_4.png new file mode 100644 index 0000000000..1038a0b91b Binary files /dev/null and b/docs/images/coralogix-provider_4.png differ diff --git a/docs/images/coralogix-provider_5.png b/docs/images/coralogix-provider_5.png new file mode 100644 index 0000000000..dbc7081e80 Binary files /dev/null and b/docs/images/coralogix-provider_5.png differ diff --git a/docs/images/coralogix-provider_6.png b/docs/images/coralogix-provider_6.png new file mode 100644 index 0000000000..f675d0102f Binary files /dev/null and b/docs/images/coralogix-provider_6.png differ diff --git a/docs/images/correlation-topology.png b/docs/images/correlation-topology.png new file mode 100644 index 0000000000..560cd36a9f Binary files /dev/null and b/docs/images/correlation-topology.png differ diff --git a/docs/images/correlation.png b/docs/images/correlation.png new file mode 100644 index 0000000000..afc4c04e1e Binary files /dev/null and b/docs/images/correlation.png differ diff --git a/docs/images/dash0-provider_1.png b/docs/images/dash0-provider_1.png new file mode 100644 index 0000000000..e4b4d3408f Binary files /dev/null and b/docs/images/dash0-provider_1.png differ diff --git a/docs/images/dash0-provider_2.png b/docs/images/dash0-provider_2.png new file mode 100644 index 0000000000..f30e5a3615 Binary files /dev/null and b/docs/images/dash0-provider_2.png differ diff --git a/docs/images/dash0-provider_3.png b/docs/images/dash0-provider_3.png new file mode 100644 index 0000000000..2ee2b19784 Binary files /dev/null and b/docs/images/dash0-provider_3.png differ diff --git a/docs/images/dash0-provider_4.png b/docs/images/dash0-provider_4.png new file mode 100644 index 0000000000..9eeb05532f Binary files /dev/null and b/docs/images/dash0-provider_4.png differ diff --git a/docs/images/dash0-provider_5.png b/docs/images/dash0-provider_5.png new file mode 100644 index 0000000000..fed83c3c12 Binary files /dev/null and b/docs/images/dash0-provider_5.png differ diff --git a/docs/images/dash0-provider_6.png b/docs/images/dash0-provider_6.png new file mode 100644 index 0000000000..114281b557 Binary files /dev/null and b/docs/images/dash0-provider_6.png differ diff --git a/docs/images/datadog_raw_alerts.txt b/docs/images/datadog_raw_alerts.txt new file mode 100644 index 0000000000..f64bb9470e --- /dev/null +++ b/docs/images/datadog_raw_alerts.txt @@ -0,0 +1,44 @@ +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nLess than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:05:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-feature-unique-id&from_ts=1733928722000&to_ts=1733929922000&event_id=7879702138782271851&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733929322000&to_ts=1733929622000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733929712000", "event_type": "log_alert", "title": "[Recovered on {service:keep-api-feature-unique-id}] Error monitor", "severity": "", "alert_type": "success", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Recovered", "date": "1733929712000", "scopes": "service:keep-api-feature-unique-id", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879702138782271851", "tags": "monitor,service:keep-api-feature-unique-id", "id": "7879702138782271851", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nLess than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:05:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-feature-historical-rules-poc&from_ts=1733928722000&to_ts=1733929922000&event_id=7879702138713295486&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733929322000&to_ts=1733929622000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733929712000", "event_type": "log_alert", "title": "[Recovered on {service:keep-api-feature-historical-rules-poc}] Error monitor", "severity": "", "alert_type": "success", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Recovered", "date": "1733929712000", "scopes": "service:keep-api-feature-historical-rules-poc", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879702138713295486", "tags": "monitor,service:keep-api-feature-historical-rules-poc", "id": "7879702138713295486", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nLess than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:05:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-feature-grafana-legacy&from_ts=1733928842000&to_ts=1733930042000&event_id=7879704162663906513&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733929442000&to_ts=1733929742000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733929833000", "event_type": "log_alert", "title": "[Recovered on {service:keep-api-feature-grafana-legacy}] Error monitor", "severity": "", "alert_type": "success", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Recovered", "date": "1733929833000", "scopes": "service:keep-api-feature-grafana-legacy", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879704162663906513", "tags": "monitor,service:keep-api-feature-grafana-legacy", "id": "7879704162663906513", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nLess than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:05:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-fix-2804-unlink-alert&from_ts=1733928902000&to_ts=1733930102000&event_id=7879705155994930207&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733929502000&to_ts=1733929802000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733929892000", "event_type": "log_alert", "title": "[Recovered on {service:keep-api-fix-2804-unlink-alert}] Error monitor", "severity": "", "alert_type": "success", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Recovered", "date": "1733929892000", "scopes": "service:keep-api-fix-2804-unlink-alert", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879705155994930207", "tags": "monitor,service:keep-api-fix-2804-unlink-alert", "id": "7879705155994930207", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:14:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-matvey-kuk-workflows-fix&from_ts=1733929142000&to_ts=1733930342000&event_id=7879709198622396010&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733929742000&to_ts=1733930042000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930133000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-matvey-kuk-workflows-fix}] Error monitor", "severity": "", "alert_type": "error", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930133000", "scopes": "service:keep-api-matvey-kuk-workflows-fix", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879709198622396010", "tags": "monitor,service:keep-api-matvey-kuk-workflows-fix", "id": "7879709198622396010", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:14:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-bugfix-yaml&from_ts=1733929142000&to_ts=1733930342000&event_id=7879709199720965710&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733929742000&to_ts=1733930042000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930133000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-bugfix-yaml}] Error monitor", "severity": "", "alert_type": "error", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930133000", "scopes": "service:keep-api-bugfix-yaml", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879709199720965710", "tags": "monitor,service:keep-api-bugfix-yaml", "id": "7879709199720965710", "monitor_id": "160076582"} +{"body": "%%%\n@webhook-keep-datadog-webhook-integration-keep\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`Aborted connection`](https://app.datadoghq.com/logs/analytics?query=Aborted+connection&agg_m=count&agg_t=count&agg_q=database_id&index=%2A)** by **database_id**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:14:04 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160077064?group=database_id%3Akeephq-sandbox%3Akeep&from_ts=1733929144000&to_ts=1733930344000&event_id=7879709234193994156&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160077064/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=Aborted+connection&from_ts=1733929744000&to_ts=1733930044000&live=false&agg_m=count&agg_t=count&agg_q=database_id&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930135000", "event_type": "log_alert", "title": "[Triggered on {database_id:keephq-sandbox:keep}] Somethine weird in DB", "severity": "", "alert_type": "error", "alert_query": "logs(\"Aborted connection\").index(\"*\").rollup(\"count\").by(\"database_id\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930135000", "scopes": "database_id:keephq-sandbox:keep", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879709234193994156", "tags": "database_id:keephq-sandbox:keep,monitor", "id": "7879709234193994156", "monitor_id": "160077064"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:15:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-feature-grafana-legacy&from_ts=1733929202000&to_ts=1733930402000&event_id=7879710212645433433&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733929802000&to_ts=1733930102000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930194000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-feature-grafana-legacy}] Error monitor", "severity": "", "alert_type": "error", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930194000", "scopes": "service:keep-api-feature-grafana-legacy", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879710212645433433", "tags": "monitor,service:keep-api-feature-grafana-legacy", "id": "7879710212645433433", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:17:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-feature-improvedocs&from_ts=1733929322000&to_ts=1733930522000&event_id=7879712214248911237&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733929922000&to_ts=1733930222000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930313000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-feature-improvedocs}] Error monitor", "severity": "", "alert_type": "error", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930313000", "scopes": "service:keep-api-feature-improvedocs", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879712214248911237", "tags": "monitor,service:keep-api-feature-improvedocs", "id": "7879712214248911237", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:18:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-ci-2766-simple-faster-ee&from_ts=1733929382000&to_ts=1733930582000&event_id=7879713295639221277&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733929982000&to_ts=1733930282000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930377000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-ci-2766-simple-faster-ee}] Error monitor", "severity": "", "alert_type": "error", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930377000", "scopes": "service:keep-api-ci-2766-simple-faster-ee", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879713295639221277", "tags": "monitor,service:keep-api-ci-2766-simple-faster-ee", "id": "7879713295639221277", "monitor_id": "160076582"} +{"body": "%%%\nhttps://app.datadoghq.com/logs/analytics?query=%40http.status_code%3A%28401+OR+403%29&from_ts=1733929988000&to_ts=1733930288000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&event=AwAAAZO2TBElyb4KmQAAABhBWk8yVEJRZkFBQWozamRiYkp3THZBQUEAAAAkMDE5M2I2NGMtMjI3My00YzM0LThhOGUtNGM0MzllMDliNTkyAAAA0g \n\n @webhook-keep-datadog-webhook-integration-keep @webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 @webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375 @webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2\n\nMore than **0.0** log events matched in the last **5m** against the monitored query: **[`@http.status_code:(401 OR 403)`](https://app.datadoghq.com/logs/analytics?query=%40http.status_code%3A%28401+OR+403%29&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:18:08 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/134462228?group=service%3Akeep-api&from_ts=1733929388000&to_ts=1733930588000&event_id=7879713311244615328&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/134462228/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=%40http.status_code%3A%28401+OR+403%29&from_ts=1733929988000&to_ts=1733930288000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930378000", "event_type": "log_alert", "title": "[P2] [Warn] Unauthorized access to API keep-api", "severity": "P2", "alert_type": "warning", "alert_query": "logs(\"@http.status_code:(401 OR 403)\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 5", "alert_transition": "Warn", "date": "1733930378000", "scopes": "service:keep-api", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879713311244615328", "tags": "environment:production,monitor,service:keep-api", "id": "7879713311244615328", "monitor_id": "134462228"} +{"body": "%%%\n@webhook-keep-datadog-webhook-integration-keep\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`err.OperationalError`](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:18:41 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160077341?group=service%3Akeep-api-feature-improvedocs&from_ts=1733929421000&to_ts=1733930621000&event_id=7879713879533759147&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160077341/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&from_ts=1733930021000&to_ts=1733930321000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930412000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-feature-improvedocs}] OperationalError DB", "severity": "", "alert_type": "error", "alert_query": "logs(\"err.OperationalError\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930412000", "scopes": "service:keep-api-feature-improvedocs", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879713879533759147", "tags": "monitor,service:keep-api-feature-improvedocs", "id": "7879713879533759147", "monitor_id": "160077341"} +{"body": "%%%\n@webhook-keep-datadog-webhook-integration-keep\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`err.OperationalError`](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:18:41 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160077341?group=service%3Akeep-api-feature-grafana-legacy&from_ts=1733929421000&to_ts=1733930621000&event_id=7879713877056374717&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160077341/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&from_ts=1733930021000&to_ts=1733930321000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930412000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-feature-grafana-legacy}] OperationalError DB", "severity": "", "alert_type": "error", "alert_query": "logs(\"err.OperationalError\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930412000", "scopes": "service:keep-api-feature-grafana-legacy", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879713877056374717", "tags": "monitor,service:keep-api-feature-grafana-legacy", "id": "7879713877056374717", "monitor_id": "160077341"} +{"body": "%%%\n@webhook-keep-datadog-webhook-integration-keep\n\nLess than **0** log events matched in the last **5m** against the monitored query: **[`err.OperationalError`](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:18:41 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160077341?group=service%3Akeep-api-feature-grafana-legacy&from_ts=1733929541000&to_ts=1733930741000&event_id=7879715880809613521&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160077341/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&from_ts=1733930141000&to_ts=1733930441000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930532000", "event_type": "log_alert", "title": "[Recovered on {service:keep-api-feature-grafana-legacy}] OperationalError DB", "severity": "", "alert_type": "success", "alert_query": "logs(\"err.OperationalError\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Recovered", "date": "1733930532000", "scopes": "service:keep-api-feature-grafana-legacy", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879715880809613521", "tags": "monitor,service:keep-api-feature-grafana-legacy", "id": "7879715880809613521", "monitor_id": "160077341"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nLess than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:15:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-feature-grafana-legacy&from_ts=1733929562000&to_ts=1733930762000&event_id=7879716233501717500&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930162000&to_ts=1733930462000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930553000", "event_type": "log_alert", "title": "[Recovered on {service:keep-api-feature-grafana-legacy}] Error monitor", "severity": "", "alert_type": "success", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Recovered", "date": "1733930553000", "scopes": "service:keep-api-feature-grafana-legacy", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879716233501717500", "tags": "monitor,service:keep-api-feature-grafana-legacy", "id": "7879716233501717500", "monitor_id": "160076582"} +{"body": "%%%\n@webhook-keep-datadog-webhook-integration-keep\n\nLess than **0** log events matched in the last **5m** against the monitored query: **[`err.OperationalError`](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:18:41 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160077341?group=service%3Akeep-api-feature-improvedocs&from_ts=1733929541000&to_ts=1733930741000&event_id=7879715879428238181&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160077341/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&from_ts=1733930141000&to_ts=1733930441000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930531000", "event_type": "log_alert", "title": "[Recovered on {service:keep-api-feature-improvedocs}] OperationalError DB", "severity": "", "alert_type": "success", "alert_query": "logs(\"err.OperationalError\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Recovered", "date": "1733930531000", "scopes": "service:keep-api-feature-improvedocs", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879715879428238181", "tags": "monitor,service:keep-api-feature-improvedocs", "id": "7879715879428238181", "monitor_id": "160077341"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nLess than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:05:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api&from_ts=1733929502000&to_ts=1733930702000&event_id=7879715220610804188&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930102000&to_ts=1733930402000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930492000", "event_type": "log_alert", "title": "[Recovered on {service:keep-api}] Error monitor", "severity": "", "alert_type": "success", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Recovered", "date": "1733930492000", "scopes": "service:keep-api", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879715220610804188", "tags": "monitor,service:keep-api", "id": "7879715220610804188", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:21:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api&from_ts=1733929562000&to_ts=1733930762000&event_id=7879716234457967220&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930162000&to_ts=1733930462000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930553000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api}] Error monitor", "severity": "", "alert_type": "error", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930553000", "scopes": "service:keep-api", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879716234457967220", "tags": "monitor,service:keep-api", "id": "7879716234457967220", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:22:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-fix-2732-bug-duplicate-entry-for-key-lastalertprimary&from_ts=1733929622000&to_ts=1733930822000&event_id=7879717268154896752&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930222000&to_ts=1733930522000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930614000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-fix-2732-bug-duplicate-entry-for-key-lastalertprimary}] Error monitor", "severity": "", "alert_type": "error", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930614000", "scopes": "service:keep-api-fix-2732-bug-duplicate-entry-for-key-lastalertprimary", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879717268154896752", "tags": "monitor,service:keep-api-fix-2732-bug-duplicate-entry-for-key-lastalertprimary", "id": "7879717268154896752", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nLess than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:18:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-ci-2766-simple-faster-ee&from_ts=1733929622000&to_ts=1733930822000&event_id=7879717259407496838&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930222000&to_ts=1733930522000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930614000", "event_type": "log_alert", "title": "[Recovered on {service:keep-api-ci-2766-simple-faster-ee}] Error monitor", "severity": "", "alert_type": "success", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Recovered", "date": "1733930614000", "scopes": "service:keep-api-ci-2766-simple-faster-ee", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879717259407496838", "tags": "monitor,service:keep-api-ci-2766-simple-faster-ee", "id": "7879717259407496838", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:22:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-matvey-kuk-workflows-fix&from_ts=1733929622000&to_ts=1733930822000&event_id=7879717254454313398&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930222000&to_ts=1733930522000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930613000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-matvey-kuk-workflows-fix}] Error monitor", "severity": "", "alert_type": "error", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930613000", "scopes": "service:keep-api-matvey-kuk-workflows-fix", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879717254454313398", "tags": "monitor,service:keep-api-matvey-kuk-workflows-fix", "id": "7879717254454313398", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nLess than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:17:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-feature-improvedocs&from_ts=1733929682000&to_ts=1733930882000&event_id=7879718246849246186&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930282000&to_ts=1733930582000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930673000", "event_type": "log_alert", "title": "[Recovered on {service:keep-api-feature-improvedocs}] Error monitor", "severity": "", "alert_type": "success", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Recovered", "date": "1733930673000", "scopes": "service:keep-api-feature-improvedocs", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879718246849246186", "tags": "monitor,service:keep-api-feature-improvedocs", "id": "7879718246849246186", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:23:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-feature-unique-id&from_ts=1733929682000&to_ts=1733930882000&event_id=7879718271367299818&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930282000&to_ts=1733930582000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930674000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-feature-unique-id}] Error monitor", "severity": "", "alert_type": "error", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930674000", "scopes": "service:keep-api-feature-unique-id", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879718271367299818", "tags": "monitor,service:keep-api-feature-unique-id", "id": "7879718271367299818", "monitor_id": "160076582"} +{"body": "%%%\n \n\n @webhook-keep-datadog-webhook-integration-keep @webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 @webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375 @webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2\n\nLess than **0.0** log events matched in the last **5m** against the monitored query: **[`@http.status_code:(401 OR 403)`](https://app.datadoghq.com/logs/analytics?query=%40http.status_code%3A%28401+OR+403%29&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:18:08 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/134462228?group=service%3Akeep-api&from_ts=1733929688000&to_ts=1733930888000&event_id=7879718351573282995&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/134462228/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=%40http.status_code%3A%28401+OR+403%29&from_ts=1733930288000&to_ts=1733930588000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930679000", "event_type": "log_alert", "title": "[P2] [Recovered] Unauthorized access to API ", "severity": "P2", "alert_type": "success", "alert_query": "logs(\"@http.status_code:(401 OR 403)\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 5", "alert_transition": "Recovered", "date": "1733930679000", "scopes": "service:keep-api", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879718351573282995", "tags": "environment:production,monitor,service:keep-api", "id": "7879718351573282995", "monitor_id": "134462228"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:24:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-fix-2780-bug-incidents-nonetype-object-is-not-iterable&from_ts=1733929742000&to_ts=1733930942000&event_id=7879719249416290957&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930342000&to_ts=1733930642000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930732000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-fix-2780-bug-incidents-nonetype-object-is-not-iterable}] Error monitor", "severity": "", "alert_type": "error", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930732000", "scopes": "service:keep-api-fix-2780-bug-incidents-nonetype-object-is-not-iterable", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879719249416290957", "tags": "monitor,service:keep-api-fix-2780-bug-incidents-nonetype-object-is-not-iterable", "id": "7879719249416290957", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:24:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-feature-historical-rules-poc&from_ts=1733929742000&to_ts=1733930942000&event_id=7879719250013996135&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930342000&to_ts=1733930642000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930732000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-feature-historical-rules-poc}] Error monitor", "severity": "", "alert_type": "error", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930732000", "scopes": "service:keep-api-feature-historical-rules-poc", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879719250013996135", "tags": "monitor,service:keep-api-feature-historical-rules-poc", "id": "7879719250013996135", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:24:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-fix-2804-unlink-alert&from_ts=1733929742000&to_ts=1733930942000&event_id=7879719251943375976&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930342000&to_ts=1733930642000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930732000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-fix-2804-unlink-alert}] Error monitor", "severity": "", "alert_type": "error", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930732000", "scopes": "service:keep-api-fix-2804-unlink-alert", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879719251943375976", "tags": "monitor,service:keep-api-fix-2804-unlink-alert", "id": "7879719251943375976", "monitor_id": "160076582"} +{"body": "%%%\nhttps://app.datadoghq.com/logs/analytics?query=%40http.status_code%3A%28401+OR+403%29&from_ts=1733930348000&to_ts=1733930648000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&event=AwAAAZO2UazNewhzMQAAABhBWk8yVWFfSUFBQUtSTVFERHFvenF3QUEAAAAkMDE5M2I2NTEtYzYzNi00MDYyLThhMzAtYTMyZTEyNzY3ZWM2AABZ8g \n\n @webhook-keep-datadog-webhook-integration-keep @webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 @webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375 @webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2\n\nMore than **5** log events matched in the last **5m** against the monitored query: **[`@http.status_code:(401 OR 403)`](https://app.datadoghq.com/logs/analytics?query=%40http.status_code%3A%28401+OR+403%29&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:24:08 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/134462228?group=service%3Akeep-api&from_ts=1733929748000&to_ts=1733930948000&event_id=7879719351243698466&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/134462228/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=%40http.status_code%3A%28401+OR+403%29&from_ts=1733930348000&to_ts=1733930648000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930738000", "event_type": "log_alert", "title": "[P2] [Triggered] Unauthorized access to API keep-api", "severity": "P2", "alert_type": "error", "alert_query": "logs(\"@http.status_code:(401 OR 403)\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 5", "alert_transition": "Triggered", "date": "1733930738000", "scopes": "service:keep-api", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879719351243698466", "tags": "environment:production,monitor,service:keep-api", "id": "7879719351243698466", "monitor_id": "134462228"} +{"body": "%%%\n@webhook-keep-datadog-webhook-integration-keep\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`err.OperationalError`](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:24:41 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160077341?group=service%3Akeep-api-feature-unique-id&from_ts=1733929781000&to_ts=1733930981000&event_id=7879719917965975808&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160077341/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&from_ts=1733930381000&to_ts=1733930681000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930772000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-feature-unique-id}] OperationalError DB", "severity": "", "alert_type": "error", "alert_query": "logs(\"err.OperationalError\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930772000", "scopes": "service:keep-api-feature-unique-id", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879719917965975808", "tags": "monitor,service:keep-api-feature-unique-id", "id": "7879719917965975808", "monitor_id": "160077341"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:25:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-ci-2766-simple-faster-ee&from_ts=1733929802000&to_ts=1733931002000&event_id=7879720264592758877&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930402000&to_ts=1733930702000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930793000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-ci-2766-simple-faster-ee}] Error monitor", "severity": "", "alert_type": "error", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930793000", "scopes": "service:keep-api-ci-2766-simple-faster-ee", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879720264592758877", "tags": "monitor,service:keep-api-ci-2766-simple-faster-ee", "id": "7879720264592758877", "monitor_id": "160076582"} +{"body": "%%%\n@webhook-keep-datadog-webhook-integration-keep\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`err.OperationalError`](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:25:41 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160077341?group=service%3Akeep-api-ci-2766-simple-faster-ee&from_ts=1733929841000&to_ts=1733931041000&event_id=7879720914500490069&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160077341/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&from_ts=1733930441000&to_ts=1733930741000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930832000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-ci-2766-simple-faster-ee}] OperationalError DB", "severity": "", "alert_type": "error", "alert_query": "logs(\"err.OperationalError\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930832000", "scopes": "service:keep-api-ci-2766-simple-faster-ee", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879720914500490069", "tags": "monitor,service:keep-api-ci-2766-simple-faster-ee", "id": "7879720914500490069", "monitor_id": "160077341"} +{"body": "%%%\n@webhook-keep-datadog-webhook-integration-keep\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`err.OperationalError`](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:25:41 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160077341?group=service%3Akeep-api-fix-2780-bug-incidents-nonetype-object-is-not-iterable&from_ts=1733929841000&to_ts=1733931041000&event_id=7879720915197393266&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160077341/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&from_ts=1733930441000&to_ts=1733930741000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930832000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-fix-2780-bug-incidents-nonetype-object-is-not-iterable}] OperationalError DB", "severity": "", "alert_type": "error", "alert_query": "logs(\"err.OperationalError\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930832000", "scopes": "service:keep-api-fix-2780-bug-incidents-nonetype-object-is-not-iterable", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879720915197393266", "tags": "monitor,service:keep-api-fix-2780-bug-incidents-nonetype-object-is-not-iterable", "id": "7879720915197393266", "monitor_id": "160077341"} +{"body": "%%%\n@webhook-keep-datadog-webhook-integration-keep\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`err.OperationalError`](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:25:41 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160077341?group=service%3Akeep-api-fix-2804-unlink-alert&from_ts=1733929841000&to_ts=1733931041000&event_id=7879720931357701015&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160077341/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&from_ts=1733930441000&to_ts=1733930741000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930833000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-fix-2804-unlink-alert}] OperationalError DB", "severity": "", "alert_type": "error", "alert_query": "logs(\"err.OperationalError\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930833000", "scopes": "service:keep-api-fix-2804-unlink-alert", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879720931357701015", "tags": "monitor,service:keep-api-fix-2804-unlink-alert", "id": "7879720931357701015", "monitor_id": "160077341"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nLess than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:22:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-fix-2732-bug-duplicate-entry-for-key-lastalertprimary&from_ts=1733929862000&to_ts=1733931062000&event_id=7879721270410105860&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930462000&to_ts=1733930762000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930853000", "event_type": "log_alert", "title": "[Recovered on {service:keep-api-fix-2732-bug-duplicate-entry-for-key-lastalertprimary}] Error monitor", "severity": "", "alert_type": "success", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Recovered", "date": "1733930853000", "scopes": "service:keep-api-fix-2732-bug-duplicate-entry-for-key-lastalertprimary", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879721270410105860", "tags": "monitor,service:keep-api-fix-2732-bug-duplicate-entry-for-key-lastalertprimary", "id": "7879721270410105860", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nLess than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:14:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-bugfix-yaml&from_ts=1733929862000&to_ts=1733931062000&event_id=7879721272152616299&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930462000&to_ts=1733930762000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930853000", "event_type": "log_alert", "title": "[Recovered on {service:keep-api-bugfix-yaml}] Error monitor", "severity": "", "alert_type": "success", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Recovered", "date": "1733930853000", "scopes": "service:keep-api-bugfix-yaml", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879721272152616299", "tags": "monitor,service:keep-api-bugfix-yaml", "id": "7879721272152616299", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nLess than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:21:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api&from_ts=1733929862000&to_ts=1733931062000&event_id=7879721270911102314&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930462000&to_ts=1733930762000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930853000", "event_type": "log_alert", "title": "[Recovered on {service:keep-api}] Error monitor", "severity": "", "alert_type": "success", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Recovered", "date": "1733930853000", "scopes": "service:keep-api", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879721270911102314", "tags": "monitor,service:keep-api", "id": "7879721270911102314", "monitor_id": "160076582"} +{"body": "%%%\n@webhook-keep-datadog-webhook-integration-keep\n\nLess than **0** log events matched in the last **5m** against the monitored query: **[`err.OperationalError`](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:18:41 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160077341?group=service%3Akeep-api-matvey-kuk-workflows-fix&from_ts=1733929901000&to_ts=1733931101000&event_id=7879721950125022979&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160077341/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&from_ts=1733930501000&to_ts=1733930801000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930893000", "event_type": "log_alert", "title": "[Recovered on {service:keep-api-matvey-kuk-workflows-fix}] OperationalError DB", "severity": "", "alert_type": "success", "alert_query": "logs(\"err.OperationalError\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Recovered", "date": "1733930893000", "scopes": "service:keep-api-matvey-kuk-workflows-fix", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879721950125022979", "tags": "monitor,service:keep-api-matvey-kuk-workflows-fix", "id": "7879721950125022979", "monitor_id": "160077341"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nLess than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:22:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-matvey-kuk-workflows-fix&from_ts=1733929922000&to_ts=1733931122000&event_id=7879722272469287876&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930522000&to_ts=1733930822000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930912000", "event_type": "log_alert", "title": "[Recovered on {service:keep-api-matvey-kuk-workflows-fix}] Error monitor", "severity": "", "alert_type": "success", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Recovered", "date": "1733930912000", "scopes": "service:keep-api-matvey-kuk-workflows-fix", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879722272469287876", "tags": "monitor,service:keep-api-matvey-kuk-workflows-fix", "id": "7879722272469287876", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:27:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-feature-improvedocs&from_ts=1733929922000&to_ts=1733931122000&event_id=7879722274337387922&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930522000&to_ts=1733930822000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930913000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-feature-improvedocs}] Error monitor", "severity": "", "alert_type": "error", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930913000", "scopes": "service:keep-api-feature-improvedocs", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879722274337387922", "tags": "monitor,service:keep-api-feature-improvedocs", "id": "7879722274337387922", "monitor_id": "160076582"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:27:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-bugfix-yaml-width&from_ts=1733929922000&to_ts=1733931122000&event_id=7879722275107215736&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930522000&to_ts=1733930822000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930913000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-bugfix-yaml-width}] Error monitor", "severity": "", "alert_type": "error", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930913000", "scopes": "service:keep-api-bugfix-yaml-width", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879722275107215736", "tags": "monitor,service:keep-api-bugfix-yaml-width", "id": "7879722275107215736", "monitor_id": "160076582"} +{"body": "%%%\n@webhook-keep-datadog-webhook-integration-keep\n\nMore than **0** log events matched in the last **5m** against the monitored query: **[`err.OperationalError`](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:27:41 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160077341?group=service%3Akeep-api-feature-improvedocs&from_ts=1733929961000&to_ts=1733931161000&event_id=7879722937552678433&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160077341/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&from_ts=1733930561000&to_ts=1733930861000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930952000", "event_type": "log_alert", "title": "[Triggered on {service:keep-api-feature-improvedocs}] OperationalError DB", "severity": "", "alert_type": "error", "alert_query": "logs(\"err.OperationalError\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Triggered", "date": "1733930952000", "scopes": "service:keep-api-feature-improvedocs", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879722937552678433", "tags": "monitor,service:keep-api-feature-improvedocs", "id": "7879722937552678433", "monitor_id": "160077341"} +{"body": "%%%\ntrace_id: \ntags: \nattributes: \n\n@webhook-keep-datadog-webhook-integration-keep \n@webhook-keep-datadog-webhook-integration-78645c69-61e9-4921-8e90-b1ae382280e5 \n@webhook-keep-datadog-webhook-integration-9ffb1c58-bd2b-4b2e-ad76-575caf43f5d2 \n@webhook-keep-datadog-webhook-integration-2f82730d-4cb5-466d-81b1-1aecb316f375\n\nLess than **0** log events matched in the last **5m** against the monitored query: **[`status:error`](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:23:02 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160076582?group=service%3Akeep-api-feature-unique-id&from_ts=1733929982000&to_ts=1733931182000&event_id=7879723300958553556&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160076582/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=status%3Aerror&from_ts=1733930582000&to_ts=1733930882000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733930974000", "event_type": "log_alert", "title": "[Recovered on {service:keep-api-feature-unique-id}] Error monitor", "severity": "", "alert_type": "success", "alert_query": "logs(\"status:error\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Recovered", "date": "1733930974000", "scopes": "service:keep-api-feature-unique-id", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879723300958553556", "tags": "monitor,service:keep-api-feature-unique-id", "id": "7879723300958553556", "monitor_id": "160076582"} +{"body": "%%%\n@webhook-keep-datadog-webhook-integration-keep\n\nLess than **0** log events matched in the last **5m** against the monitored query: **[`err.OperationalError`](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&agg_m=count&agg_t=count&agg_q=service&index=%2A)** by **service**\n\nThe monitor was last triggered at Wed Dec 11 2024 15:24:41 UTC.\n\n- - -\n\n[[Monitor Status](https://app.datadoghq.com/monitors/160077341?group=service%3Akeep-api-feature-unique-id&from_ts=1733930021000&to_ts=1733931221000&event_id=7879723934280491883&link_source=monitor_notif)] \u00b7 [[Edit Monitor](https://app.datadoghq.com/monitors/160077341/edit?link_source=monitor_notif)] \u00b7 [[Related Logs](https://app.datadoghq.com/logs/analytics?query=err.OperationalError&from_ts=1733930621000&to_ts=1733930921000&live=false&agg_m=count&agg_t=count&agg_q=service&index=%2A&link_source=monitor_notif)]\n%%%", "last_updated": "1733931012000", "event_type": "log_alert", "title": "[Recovered on {service:keep-api-feature-unique-id}] OperationalError DB", "severity": "", "alert_type": "success", "alert_query": "logs(\"err.OperationalError\").index(\"*\").rollup(\"count\").by(\"service\").last(\"5m\") > 0", "alert_transition": "Recovered", "date": "1733931012000", "scopes": "service:keep-api-feature-unique-id", "org": {"id": "831563", "name": "DPN | KeepHQ"}, "url": "https://app.datadoghq.com/event/event?id=7879723934280491883", "tags": "monitor,service:keep-api-feature-unique-id", "id": "7879723934280491883", "monitor_id": "160077341"} +sqlite> diff --git a/docs/images/dbauth.png b/docs/images/dbauth.png new file mode 100644 index 0000000000..bd46933bdb Binary files /dev/null and b/docs/images/dbauth.png differ diff --git a/docs/images/deduplication.png b/docs/images/deduplication.png new file mode 100644 index 0000000000..5d221a7549 Binary files /dev/null and b/docs/images/deduplication.png differ diff --git a/docs/images/extraction.png b/docs/images/extraction.png new file mode 100644 index 0000000000..83473d5e98 Binary files /dev/null and b/docs/images/extraction.png differ diff --git a/docs/images/faq/faq-browser-settings.png b/docs/images/faq/faq-browser-settings.png new file mode 100644 index 0000000000..5a0db7ec27 Binary files /dev/null and b/docs/images/faq/faq-browser-settings.png differ diff --git a/docs/images/faq/faq-clipboard-blocked.png b/docs/images/faq/faq-clipboard-blocked.png new file mode 100644 index 0000000000..982195ac63 Binary files /dev/null and b/docs/images/faq/faq-clipboard-blocked.png differ diff --git a/docs/images/flashduty_1.png b/docs/images/flashduty_1.png new file mode 100644 index 0000000000..a6d938bebd Binary files /dev/null and b/docs/images/flashduty_1.png differ diff --git a/docs/images/flashduty_2.png b/docs/images/flashduty_2.png new file mode 100644 index 0000000000..8bb3d2a2d2 Binary files /dev/null and b/docs/images/flashduty_2.png differ diff --git a/docs/images/flashduty_3.png b/docs/images/flashduty_3.png new file mode 100644 index 0000000000..127ee9ac63 Binary files /dev/null and b/docs/images/flashduty_3.png differ diff --git a/docs/images/gke.png b/docs/images/gke.png new file mode 100644 index 0000000000..9050f17b49 Binary files /dev/null and b/docs/images/gke.png differ diff --git a/docs/images/gke2.png b/docs/images/gke2.png new file mode 100644 index 0000000000..ad106654f5 Binary files /dev/null and b/docs/images/gke2.png differ diff --git a/docs/images/incident_1.png b/docs/images/incident_1.png new file mode 100644 index 0000000000..ca0cd85a27 Binary files /dev/null and b/docs/images/incident_1.png differ diff --git a/docs/images/incident_activity.png b/docs/images/incident_activity.png new file mode 100644 index 0000000000..16cc3a5f77 Binary files /dev/null and b/docs/images/incident_activity.png differ diff --git a/docs/images/incident_copilot.png b/docs/images/incident_copilot.png new file mode 100644 index 0000000000..c2de0983cc Binary files /dev/null and b/docs/images/incident_copilot.png differ diff --git a/docs/images/incident_edit.png b/docs/images/incident_edit.png new file mode 100644 index 0000000000..16384010b2 Binary files /dev/null and b/docs/images/incident_edit.png differ diff --git a/docs/images/incident_service.png b/docs/images/incident_service.png new file mode 100644 index 0000000000..efa64f73ad Binary files /dev/null and b/docs/images/incident_service.png differ diff --git a/docs/images/incident_timeline.png b/docs/images/incident_timeline.png new file mode 100644 index 0000000000..dfecbfd661 Binary files /dev/null and b/docs/images/incident_timeline.png differ diff --git a/docs/images/incident_workflow.png b/docs/images/incident_workflow.png new file mode 100644 index 0000000000..b284e0d1a7 Binary files /dev/null and b/docs/images/incident_workflow.png differ diff --git a/docs/images/incidents/add_facet_for_incident.png b/docs/images/incidents/add_facet_for_incident.png new file mode 100644 index 0000000000..ae7066b46a Binary files /dev/null and b/docs/images/incidents/add_facet_for_incident.png differ diff --git a/docs/images/keep-apikey-1.png b/docs/images/keep-apikey-1.png new file mode 100644 index 0000000000..ec6671adb5 Binary files /dev/null and b/docs/images/keep-apikey-1.png differ diff --git a/docs/images/keep-apikey-2.png b/docs/images/keep-apikey-2.png new file mode 100644 index 0000000000..e3e45981c4 Binary files /dev/null and b/docs/images/keep-apikey-2.png differ diff --git a/docs/images/keep-apikey-3.png b/docs/images/keep-apikey-3.png new file mode 100644 index 0000000000..25b7e0cda4 Binary files /dev/null and b/docs/images/keep-apikey-3.png differ diff --git a/docs/images/keep-apikey-4.png b/docs/images/keep-apikey-4.png new file mode 100644 index 0000000000..c0920bd675 Binary files /dev/null and b/docs/images/keep-apikey-4.png differ diff --git a/docs/images/keycloakauth.png b/docs/images/keycloakauth.png new file mode 100644 index 0000000000..2db67636dc Binary files /dev/null and b/docs/images/keycloakauth.png differ diff --git a/docs/images/librenms-provider_1.png b/docs/images/librenms-provider_1.png new file mode 100644 index 0000000000..3ba17d9e76 Binary files /dev/null and b/docs/images/librenms-provider_1.png differ diff --git a/docs/images/librenms-provider_2.png b/docs/images/librenms-provider_2.png new file mode 100644 index 0000000000..126984c858 Binary files /dev/null and b/docs/images/librenms-provider_2.png differ diff --git a/docs/images/librenms-provider_3.png b/docs/images/librenms-provider_3.png new file mode 100644 index 0000000000..6c12a1afa4 Binary files /dev/null and b/docs/images/librenms-provider_3.png differ diff --git a/docs/images/librenms-provider_4.png b/docs/images/librenms-provider_4.png new file mode 100644 index 0000000000..ce12e8fd2d Binary files /dev/null and b/docs/images/librenms-provider_4.png differ diff --git a/docs/images/librenms-provider_5.png b/docs/images/librenms-provider_5.png new file mode 100644 index 0000000000..1b1af79eb0 Binary files /dev/null and b/docs/images/librenms-provider_5.png differ diff --git a/docs/images/librenms-provider_6.png b/docs/images/librenms-provider_6.png new file mode 100644 index 0000000000..c41d82f855 Binary files /dev/null and b/docs/images/librenms-provider_6.png differ diff --git a/docs/images/linked-providers.png b/docs/images/linked-providers.png new file mode 100644 index 0000000000..904ee166cc Binary files /dev/null and b/docs/images/linked-providers.png differ diff --git a/docs/images/mailgun_email_address.png b/docs/images/mailgun_email_address.png new file mode 100644 index 0000000000..0736a67c5e Binary files /dev/null and b/docs/images/mailgun_email_address.png differ diff --git a/docs/images/mailgun_extraction.png b/docs/images/mailgun_extraction.png new file mode 100644 index 0000000000..5029f01243 Binary files /dev/null and b/docs/images/mailgun_extraction.png differ diff --git a/docs/images/maintenance-window-creation.png b/docs/images/maintenance-window-creation.png new file mode 100644 index 0000000000..dde79e05e6 Binary files /dev/null and b/docs/images/maintenance-window-creation.png differ diff --git a/docs/images/maintenance.png b/docs/images/maintenance.png new file mode 100644 index 0000000000..a0293c27b4 Binary files /dev/null and b/docs/images/maintenance.png differ diff --git a/docs/images/mapping.png b/docs/images/mapping.png new file mode 100644 index 0000000000..5202a44e84 Binary files /dev/null and b/docs/images/mapping.png differ diff --git a/docs/images/netbox-provider_1.png b/docs/images/netbox-provider_1.png new file mode 100644 index 0000000000..acfd117c30 Binary files /dev/null and b/docs/images/netbox-provider_1.png differ diff --git a/docs/images/netbox-provider_10.png b/docs/images/netbox-provider_10.png new file mode 100644 index 0000000000..d7412efd88 Binary files /dev/null and b/docs/images/netbox-provider_10.png differ diff --git a/docs/images/netbox-provider_2.png b/docs/images/netbox-provider_2.png new file mode 100644 index 0000000000..75a4287a1a Binary files /dev/null and b/docs/images/netbox-provider_2.png differ diff --git a/docs/images/netbox-provider_3.png b/docs/images/netbox-provider_3.png new file mode 100644 index 0000000000..a08a4a96ad Binary files /dev/null and b/docs/images/netbox-provider_3.png differ diff --git a/docs/images/netbox-provider_4.png b/docs/images/netbox-provider_4.png new file mode 100644 index 0000000000..fb55c1da12 Binary files /dev/null and b/docs/images/netbox-provider_4.png differ diff --git a/docs/images/netbox-provider_5.png b/docs/images/netbox-provider_5.png new file mode 100644 index 0000000000..59b4c2d414 Binary files /dev/null and b/docs/images/netbox-provider_5.png differ diff --git a/docs/images/netbox-provider_6.png b/docs/images/netbox-provider_6.png new file mode 100644 index 0000000000..2b68bc9fcc Binary files /dev/null and b/docs/images/netbox-provider_6.png differ diff --git a/docs/images/netbox-provider_7.png b/docs/images/netbox-provider_7.png new file mode 100644 index 0000000000..9701aa8ed1 Binary files /dev/null and b/docs/images/netbox-provider_7.png differ diff --git a/docs/images/netbox-provider_8.png b/docs/images/netbox-provider_8.png new file mode 100644 index 0000000000..d97140f154 Binary files /dev/null and b/docs/images/netbox-provider_8.png differ diff --git a/docs/images/netbox-provider_9.png b/docs/images/netbox-provider_9.png new file mode 100644 index 0000000000..7e16667795 Binary files /dev/null and b/docs/images/netbox-provider_9.png differ diff --git a/docs/images/opsgenie-provider_1.png b/docs/images/opsgenie-provider_1.png new file mode 100644 index 0000000000..84d0f3f963 Binary files /dev/null and b/docs/images/opsgenie-provider_1.png differ diff --git a/docs/images/opsgenie-provider_2.png b/docs/images/opsgenie-provider_2.png new file mode 100644 index 0000000000..f2eb65190e Binary files /dev/null and b/docs/images/opsgenie-provider_2.png differ diff --git a/docs/images/opsgenie-provider_3.png b/docs/images/opsgenie-provider_3.png new file mode 100644 index 0000000000..933ac79a6d Binary files /dev/null and b/docs/images/opsgenie-provider_3.png differ diff --git a/docs/images/opsgenie-provider_4.png b/docs/images/opsgenie-provider_4.png new file mode 100644 index 0000000000..cdab895d50 Binary files /dev/null and b/docs/images/opsgenie-provider_4.png differ diff --git a/docs/images/pagerduty-account-scope.png b/docs/images/pagerduty-account-scope.png new file mode 100644 index 0000000000..845ae8357d Binary files /dev/null and b/docs/images/pagerduty-account-scope.png differ diff --git a/docs/images/pagerduty-app-registration.png b/docs/images/pagerduty-app-registration.png new file mode 100644 index 0000000000..f7b0493579 Binary files /dev/null and b/docs/images/pagerduty-app-registration.png differ diff --git a/docs/images/pagerduty-oauth2-credentials.png b/docs/images/pagerduty-oauth2-credentials.png new file mode 100644 index 0000000000..653b55e190 Binary files /dev/null and b/docs/images/pagerduty-oauth2-credentials.png differ diff --git a/docs/images/pagerduty-redirect-url.png b/docs/images/pagerduty-redirect-url.png new file mode 100644 index 0000000000..b624f51903 Binary files /dev/null and b/docs/images/pagerduty-redirect-url.png differ diff --git a/docs/images/pagerduty-service-scope.png b/docs/images/pagerduty-service-scope.png new file mode 100644 index 0000000000..9677399236 Binary files /dev/null and b/docs/images/pagerduty-service-scope.png differ diff --git a/docs/images/playground.png b/docs/images/playground.png new file mode 100644 index 0000000000..4afd73ba3f Binary files /dev/null and b/docs/images/playground.png differ diff --git a/docs/images/provider-methods-assistant.png b/docs/images/provider-methods-assistant.png new file mode 100644 index 0000000000..c5d39d8599 Binary files /dev/null and b/docs/images/provider-methods-assistant.png differ diff --git a/docs/images/provider-methods-menu.png b/docs/images/provider-methods-menu.png new file mode 100644 index 0000000000..ebd65ac54b Binary files /dev/null and b/docs/images/provider-methods-menu.png differ diff --git a/docs/images/provider-methods-modal.png b/docs/images/provider-methods-modal.png new file mode 100644 index 0000000000..d2db2fef51 Binary files /dev/null and b/docs/images/provider-methods-modal.png differ diff --git a/docs/images/pulling-enabled.png b/docs/images/pulling-enabled.png new file mode 100644 index 0000000000..250113dd33 Binary files /dev/null and b/docs/images/pulling-enabled.png differ diff --git a/docs/images/pushing-enabled.png b/docs/images/pushing-enabled.png new file mode 100644 index 0000000000..22d58c5079 Binary files /dev/null and b/docs/images/pushing-enabled.png differ diff --git a/docs/images/sentry-create-integration.png b/docs/images/sentry-create-integration.png new file mode 100644 index 0000000000..0db7c43689 Binary files /dev/null and b/docs/images/sentry-create-integration.png differ diff --git a/docs/images/sentry-indicative-name.png b/docs/images/sentry-indicative-name.png new file mode 100644 index 0000000000..a8aafd5bab Binary files /dev/null and b/docs/images/sentry-indicative-name.png differ diff --git a/docs/images/sentry-internal-integration.png b/docs/images/sentry-internal-integration.png new file mode 100644 index 0000000000..3a6d7448f4 Binary files /dev/null and b/docs/images/sentry-internal-integration.png differ diff --git a/docs/images/sentry-new-token.png b/docs/images/sentry-new-token.png new file mode 100644 index 0000000000..b48127bd69 Binary files /dev/null and b/docs/images/sentry-new-token.png differ diff --git a/docs/images/sentry-save-changes.png b/docs/images/sentry-save-changes.png new file mode 100644 index 0000000000..18a5068265 Binary files /dev/null and b/docs/images/sentry-save-changes.png differ diff --git a/docs/images/sentry-token.png b/docs/images/sentry-token.png new file mode 100644 index 0000000000..df65bed583 Binary files /dev/null and b/docs/images/sentry-token.png differ diff --git a/docs/images/servicetopology.png b/docs/images/servicetopology.png new file mode 100644 index 0000000000..dc822bcb39 Binary files /dev/null and b/docs/images/servicetopology.png differ diff --git a/docs/images/singletenant.png b/docs/images/singletenant.png deleted file mode 100644 index fd601b8943..0000000000 Binary files a/docs/images/singletenant.png and /dev/null differ diff --git a/docs/images/thousandeyes-provider_1.png b/docs/images/thousandeyes-provider_1.png new file mode 100644 index 0000000000..deccccf7e9 Binary files /dev/null and b/docs/images/thousandeyes-provider_1.png differ diff --git a/docs/images/thousandeyes-provider_2.png b/docs/images/thousandeyes-provider_2.png new file mode 100644 index 0000000000..cdee46afdc Binary files /dev/null and b/docs/images/thousandeyes-provider_2.png differ diff --git a/docs/images/thousandeyes-provider_3.png b/docs/images/thousandeyes-provider_3.png new file mode 100644 index 0000000000..ac46863028 Binary files /dev/null and b/docs/images/thousandeyes-provider_3.png differ diff --git a/docs/images/thousandeyes-provider_4.png b/docs/images/thousandeyes-provider_4.png new file mode 100644 index 0000000000..f8bde9a618 Binary files /dev/null and b/docs/images/thousandeyes-provider_4.png differ diff --git a/docs/images/thousandeyes-provider_5.png b/docs/images/thousandeyes-provider_5.png new file mode 100644 index 0000000000..7cee0bd67a Binary files /dev/null and b/docs/images/thousandeyes-provider_5.png differ diff --git a/docs/images/thousandeyes-provider_6.png b/docs/images/thousandeyes-provider_6.png new file mode 100644 index 0000000000..670b834e88 Binary files /dev/null and b/docs/images/thousandeyes-provider_6.png differ diff --git a/docs/images/thousandeyes-provider_7.png b/docs/images/thousandeyes-provider_7.png new file mode 100644 index 0000000000..aa2dd9fa45 Binary files /dev/null and b/docs/images/thousandeyes-provider_7.png differ diff --git a/docs/images/topology/topology_add_connection.png b/docs/images/topology/topology_add_connection.png new file mode 100644 index 0000000000..c998d7c18e Binary files /dev/null and b/docs/images/topology/topology_add_connection.png differ diff --git a/docs/images/topology/topology_add_node.png b/docs/images/topology/topology_add_node.png new file mode 100644 index 0000000000..df37f29cce Binary files /dev/null and b/docs/images/topology/topology_add_node.png differ diff --git a/docs/images/topology/topology_add_protocol.png b/docs/images/topology/topology_add_protocol.png new file mode 100644 index 0000000000..a4f65d278b Binary files /dev/null and b/docs/images/topology/topology_add_protocol.png differ diff --git a/docs/images/topology/topology_delete_dependency.png b/docs/images/topology/topology_delete_dependency.png new file mode 100644 index 0000000000..c472ebc2b9 Binary files /dev/null and b/docs/images/topology/topology_delete_dependency.png differ diff --git a/docs/images/topology/topology_delete_services.png b/docs/images/topology/topology_delete_services.png new file mode 100644 index 0000000000..3914f0a2a4 Binary files /dev/null and b/docs/images/topology/topology_delete_services.png differ diff --git a/docs/images/topology/topology_edit_dependency.png b/docs/images/topology/topology_edit_dependency.png new file mode 100644 index 0000000000..8a97f383e7 Binary files /dev/null and b/docs/images/topology/topology_edit_dependency.png differ diff --git a/docs/images/topology/topology_import_export.png b/docs/images/topology/topology_import_export.png new file mode 100644 index 0000000000..de836ae3ac Binary files /dev/null and b/docs/images/topology/topology_import_export.png differ diff --git a/docs/images/topology/topology_new_node.png b/docs/images/topology/topology_new_node.png new file mode 100644 index 0000000000..cc3570a679 Binary files /dev/null and b/docs/images/topology/topology_new_node.png differ diff --git a/docs/images/topology/topology_protocol_added.png b/docs/images/topology/topology_protocol_added.png new file mode 100644 index 0000000000..b93008324b Binary files /dev/null and b/docs/images/topology/topology_protocol_added.png differ diff --git a/docs/images/topology/topology_sidebar_add.png b/docs/images/topology/topology_sidebar_add.png new file mode 100644 index 0000000000..b5974955aa Binary files /dev/null and b/docs/images/topology/topology_sidebar_add.png differ diff --git a/docs/images/topology/topology_update_service.png b/docs/images/topology/topology_update_service.png new file mode 100644 index 0000000000..afdc67b78d Binary files /dev/null and b/docs/images/topology/topology_update_service.png differ diff --git a/docs/images/workflow.png b/docs/images/workflow.png new file mode 100644 index 0000000000..0015c893d0 Binary files /dev/null and b/docs/images/workflow.png differ diff --git a/docs/images/zabbix_role.png b/docs/images/zabbix_role.png index 18ad89bd7b..c77f3a0d2d 100644 Binary files a/docs/images/zabbix_role.png and b/docs/images/zabbix_role.png differ diff --git a/docs/images/zoom1.png b/docs/images/zoom1.png new file mode 100644 index 0000000000..e4c850cea2 Binary files /dev/null and b/docs/images/zoom1.png differ diff --git a/docs/images/zoom2.png b/docs/images/zoom2.png new file mode 100644 index 0000000000..24916a3c60 Binary files /dev/null and b/docs/images/zoom2.png differ diff --git a/docs/images/zoom3.png b/docs/images/zoom3.png new file mode 100644 index 0000000000..e9236948bd Binary files /dev/null and b/docs/images/zoom3.png differ diff --git a/docs/images/zoom4.png b/docs/images/zoom4.png new file mode 100644 index 0000000000..aed17177e7 Binary files /dev/null and b/docs/images/zoom4.png differ diff --git a/docs/images/zoom5.png b/docs/images/zoom5.png new file mode 100644 index 0000000000..427962bfee Binary files /dev/null and b/docs/images/zoom5.png differ diff --git a/docs/images/zoom6.png b/docs/images/zoom6.png new file mode 100644 index 0000000000..805f8206ce Binary files /dev/null and b/docs/images/zoom6.png differ diff --git a/docs/images/zoom_chat-provider1.png b/docs/images/zoom_chat-provider1.png new file mode 100644 index 0000000000..3fedb4f11e Binary files /dev/null and b/docs/images/zoom_chat-provider1.png differ diff --git a/docs/images/zoom_chat-provider10.png b/docs/images/zoom_chat-provider10.png new file mode 100644 index 0000000000..83a1af63e0 Binary files /dev/null and b/docs/images/zoom_chat-provider10.png differ diff --git a/docs/images/zoom_chat-provider2.png b/docs/images/zoom_chat-provider2.png new file mode 100644 index 0000000000..a315f5b573 Binary files /dev/null and b/docs/images/zoom_chat-provider2.png differ diff --git a/docs/images/zoom_chat-provider3.png b/docs/images/zoom_chat-provider3.png new file mode 100644 index 0000000000..44af6d78bb Binary files /dev/null and b/docs/images/zoom_chat-provider3.png differ diff --git a/docs/images/zoom_chat-provider4.png b/docs/images/zoom_chat-provider4.png new file mode 100644 index 0000000000..c073ae4f75 Binary files /dev/null and b/docs/images/zoom_chat-provider4.png differ diff --git a/docs/images/zoom_chat-provider5.png b/docs/images/zoom_chat-provider5.png new file mode 100644 index 0000000000..56ae5e5531 Binary files /dev/null and b/docs/images/zoom_chat-provider5.png differ diff --git a/docs/images/zoom_chat-provider6.png b/docs/images/zoom_chat-provider6.png new file mode 100644 index 0000000000..b217320a3c Binary files /dev/null and b/docs/images/zoom_chat-provider6.png differ diff --git a/docs/images/zoom_chat-provider7.png b/docs/images/zoom_chat-provider7.png new file mode 100644 index 0000000000..fb34a190b2 Binary files /dev/null and b/docs/images/zoom_chat-provider7.png differ diff --git a/docs/images/zoom_chat-provider8.png b/docs/images/zoom_chat-provider8.png new file mode 100644 index 0000000000..812fa24346 Binary files /dev/null and b/docs/images/zoom_chat-provider8.png differ diff --git a/docs/images/zoom_chat-provider9.png b/docs/images/zoom_chat-provider9.png new file mode 100644 index 0000000000..4c2aa7c5bf Binary files /dev/null and b/docs/images/zoom_chat-provider9.png differ diff --git a/docs/incidents/facets.mdx b/docs/incidents/facets.mdx new file mode 100644 index 0000000000..5af42231a0 --- /dev/null +++ b/docs/incidents/facets.mdx @@ -0,0 +1,36 @@ +Faceted search is a powerful mechanism for enhancing search functionality, allowing users to filter and refine search results dynamically using multiple dimensions or "facets." These facets are predefined categories or attributes of the data. In Keep, the Incidents page supports faceted search by incident attributes. + +### Predefined Incident Facets +These are predefined Incident facets that can be used to filter incidents: +- **Status**: Filter by Incident status +- **Severity**: Filter by Incident severity +- **Assignee**: Filter by Incident assignee +- **Source**: Filter by alert source +- **Service**: Filter by the service the Incident relates to + +### Custom Facets Creation +Keep also supports custom facets creation. Here is how to do this: +1. Click the "Add facet" button in the filtering panel. +2. Enter the Facet name. This is the name that will be displayed in the filter panel. +3. Enter the Facet property path the facet will filter by. +4. Click "Create". + + + + + +### Supported Properties to create Facets for +Incident supports facets by direct Incident fields and also by Alert's data linked to the Incident. Here is a list of properties you can create facets for: +- **name**: Incident name +- **summary**: Incident summary +- **creation_time**: Incident creation time +- **start_time**: Incident start time +- **end_time**: Incident end time +- **last_seen_time**: Incident last seen time +- **is_predicted**: Whether the Incident is predicted +- **is_candidate**: Whether the Incident is candidate +- **alerts_count**: Number of alerts associated with the Incident +- **merged_at**: When the Incident was merged +- **merged_by**: Who merged the Incident +- **hasLinkedIncident**: Whether the Incident has past incident linked +- **alert.***: Refers to alert properties in the Incident. Examples: alert.labels.monitor, alert.monitor, etc. diff --git a/docs/incidents/overview.mdx b/docs/incidents/overview.mdx new file mode 100644 index 0000000000..8d4687e38d --- /dev/null +++ b/docs/incidents/overview.mdx @@ -0,0 +1,109 @@ +--- +title: "Overview" +--- + +Keep's incident management system provides a comprehensive solution for handling, tracking, and resolving operational incidents. This system helps teams effectively manage incidents from detection through resolution, ensuring minimal downtime and efficient collaboration. + + + + + + +### (1) Incident Severity +Displays the severity of the incident, helping teams prioritize and focus on the most critical issues. + +### (2) Incident Name +The unique name or identifier of the incident for easy reference and tracking. + +### (3) Incident Summary (+ AI Summary) +A brief overview of the incident, optionally enhanced with AI-generated summaries to provide deeper insights. + +### (4) Link Similar Incidents +Connects related incidents for better visibility into recurring or interconnected issues. + +### (5) Involved Services +Lists the services affected by the incident, allowing teams to understand the scope of the impact. + +### (6) Affected Environments +Specifies the environments (e.g., production, staging) impacted by the incident. + +### (7) Run Workflow +Quickly initiate workflows to address the incident, such as creating tickets, notifying teams, or executing remediation steps. + + + + + + +### (8) Edit Incident +Allows modification of incident details, such as severity, name, or involved services, to keep information up-to-date. + + + + + +### (9) Incident Status +Indicates the current status of the incident (e.g., open, resolved, acknowledged). + +### (10) Incident Last Seen At +Records the most recent timestamp when the incident was observed, providing context for its activity. + +### (11) Incident Started At +Indicates when the incident was first detected, helping establish timelines for resolution. + +### (12) Incident Assignee +Displays the individual or team responsible for resolving the incident, promoting accountability. + +### (13) Incident Group By Value +Groups incidents based on a specific attribute, such as service, environment, or severity, for better organization. + +### (14) Incident Related Alerts +Lists all alerts linked to the incident, offering a complete view of its underlying causes. + +### (15) Incident Activity +Tracks all activities and updates related to the incident, enabling detailed audits and reviews. + + + + + +### (16) Incident Timeline +Provides a chronological view of the incident's lifecycle, including updates, actions, and status changes. + + + + + +### (17) Incident Topology +Visualizes the relationships between affected components, services, and infrastructure in a topology map. + + + + + +### (18) Incident Workflows +Lists workflows associated with the incident, showing actions taken or available options for resolution. + +### (19) Incident Chat with AI (Incident Copilot) +Engage with AI-powered chat for guidance, insights, or recommended actions related to the incident. + + + + + +### (20) Incident Alert List +Displays a detailed list of alerts contributing to the incident, with metadata for each alert. + +### (21) Incident Alert Link +Provides quick access to the original monitoring tool for a specific alert. + +### (22) Incident Alert Status +Shows the current status of each alert, such as acknowledged, resolved, or firing. + +### (23) Incident Correlation Type +Indicates how the incident was correlated: manually, via AI, or by rule-based logic. + +### (24) Incident Alert Unlink +Enables unlinking specific alerts from the incident if they are found to be unrelated. + +--- diff --git a/docs/mint.json b/docs/mint.json index 51a2b654c9..cce7cb52f2 100644 --- a/docs/mint.json +++ b/docs/mint.json @@ -21,351 +21,315 @@ "url": "https://platform.keephq.dev/" } ], + "analytics": { + "posthog": { + "apiKey": "phc_mYqciA4RO5g48K6KnmZtftn5xQa5625Aao7vsVC0gJ9" + } + }, "anchors": [], "navigation": [ { "group": "Overview", "pages": [ "overview/introduction", - "overview/keyconcepts", + "overview/playground", "overview/usecases", - "overview/ruleengine", - "overview/presets", { - "group": "Enrichments", + "group": "Key Concepts", "pages": [ - "overview/enrichment/extraction", - "overview/enrichment/mapping" + "overview/glossary", + "overview/cel", + "overview/fingerprints", + "overview/alertseverityandstatus", + "overview/howdoeskeepgetmyalerts", + "overview/comparisons" ] }, - "overview/examples", - "overview/comparison" + "overview/support", + "overview/faq" ] }, { - "group": "Development", + "group": "AIOps", + "pages": [ + { + "group": "AI", + "pages": [ + "overview/ai-incident-assistant", + "overview/ai-workflow-assistant", + "overview/ai-semi-automatic-correlation", + "overview/ai-in-workflows", + "overview/ai-correlation" + ] + }, + { + "group": "Non-AI Correlation", + "pages": [ + "overview/correlation-rules", + "overview/correlation-topology" + ] + }, + "overview/deduplication", + "overview/enrichment/extraction", + "overview/enrichment/mapping", + "overview/maintenance-windows", + "overview/servicetopology", + "overview/workflow-automation" + ] + }, + { + "group": "Alerts", "pages": [ - "development/getting-started", - "development/authentication", - "development/external-url" + "alerts/overview", + "alerts/table", + "alerts/actionmenu", + "alerts/sidebar", + "alerts/presets", + "alerts/sound" ] }, { - "group": "Deployment", + "group": "Incidents", + "pages": ["incidents/overview", "incidents/facets"] + }, + { + "group": "Workflow Automation", "pages": [ - "deployment/authentication", - "deployment/secret-manager", - "deployment/docker", - "deployment/kubernetes", - "deployment/openshift", - "deployment/ecs" + "workflows/overview", + { + "group": "Syntax", + "pages": [ + "workflows/syntax/triggers", + "workflows/syntax/permissions", + "workflows/syntax/steps-and-actions", + "workflows/syntax/conditions", + "workflows/syntax/functions", + "workflows/syntax/context", + "workflows/syntax/providers", + "workflows/syntax/foreach", + "workflows/syntax/enrichment" + ] + }, + { + "group": "Examples", + "pages": [ + "workflows/examples/autosupress", + "workflows/examples/buisnesshours", + "workflows/examples/create-servicenow-tickets", + "workflows/examples/highsev", + "workflows/examples/update-servicenow-tickets" + ] + } ] }, { - "group": "Platform", + "group": "Alert Evaluation Engine", "pages": [ - "platform/overview", - "platform/providers", - "platform/alerts", - "platform/alertseverityandstatus", - "platform/workflows", - "platform/workflowbuilder", - "platform/settings" + "alertevaluation/overview", + { + "group": "Examples", + "pages": [ + "alertevaluation/examples/victoriametricssingle", + "alertevaluation/examples/victoriametricsmulti" + ] + } ] }, { "group": "Providers", "pages": [ "providers/overview", - "providers/fingerprints", - "providers/adding-a-new-provider", + "providers/linked-providers", + "providers/provider-methods", { "group": "Supported Providers", "pages": [ - "providers/documentation/appdynamics-provider", + "providers/documentation/airflow-provider", "providers/documentation/aks-provider", + "providers/documentation/amazonsqs-provider", + "providers/documentation/anthropic-provider", + "providers/documentation/appdynamics-provider", + "providers/documentation/asana-provider", + "providers/documentation/s3-provider", + "providers/documentation/argocd-provider", + "providers/documentation/auth0-provider", "providers/documentation/axiom-provider", "providers/documentation/azuremonitoring-provider", + "providers/documentation/bash-provider", + "providers/documentation/bigquery-provider", "providers/documentation/centreon-provider", + "providers/documentation/checkmk-provider", + "providers/documentation/checkly-provider", + "providers/documentation/cilium-provider", + "providers/documentation/clickhouse-provider", "providers/documentation/cloudwatch-provider", "providers/documentation/console-provider", + "providers/documentation/coralogix-provider", + "providers/documentation/dash0-provider", + "providers/documentation/databend-provider", "providers/documentation/datadog-provider", + "providers/documentation/deepseek-provider", "providers/documentation/discord-provider", + "providers/documentation/dynatrace-provider", + "providers/documentation/eks-provider", "providers/documentation/elastic-provider", + "providers/documentation/flashduty-provider", + "providers/documentation/fluxcd-provider", "providers/documentation/gcpmonitoring-provider", + "providers/documentation/gemini-provider", + "providers/documentation/github-provider", + "providers/documentation/github_workflows_provider", "providers/documentation/gitlab-provider", + "providers/documentation/gitlabpipelines-provider", + "providers/documentation/gke-provider", "providers/documentation/google_chat-provider", + "providers/documentation/grafana-provider", "providers/documentation/grafana_incident-provider", + "providers/documentation/grafana_loki-provider", "providers/documentation/grafana_oncall-provider", - "providers/documentation/grafana-provider", + "providers/documentation/graylog-provider", + "providers/documentation/grok-provider", "providers/documentation/http-provider", + "providers/documentation/icinga2-provider", "providers/documentation/ilert-provider", "providers/documentation/incidentio-provider", + "providers/documentation/incidentmanager-provider", + "providers/documentation/jira-on-prem-provider", "providers/documentation/jira-provider", + "providers/documentation/kafka-provider", + "providers/documentation/keep-provider", "providers/documentation/kibana-provider", "providers/documentation/kubernetes-provider", + "providers/documentation/libre_nms-provider", + "providers/documentation/linear_provider", "providers/documentation/linearb-provider", - "providers/documentation/mailchimp-provider", + "providers/documentation/litellm-provider", + "providers/documentation/llamacpp-provider", + "providers/documentation/mailgun-provider", + "providers/documentation/mattermost-provider", + "providers/documentation/microsoft-planner-provider", "providers/documentation/mock-provider", + "providers/documentation/monday-provider", + "providers/documentation/mongodb-provider", "providers/documentation/mysql-provider", + "providers/documentation/netbox-provider", "providers/documentation/netdata-provider", "providers/documentation/new-relic-provider", "providers/documentation/ntfy-provider", + "providers/documentation/ollama-provider", + "providers/documentation/openai-provider", "providers/documentation/openobserve-provider", + "providers/documentation/opensearchserverless-provider", "providers/documentation/openshift-provider", "providers/documentation/opsgenie-provider", "providers/documentation/pagerduty-provider", "providers/documentation/pagertree-provider", + "providers/documentation/parseable-provider", "providers/documentation/pingdom-provider", + "providers/documentation/posthog-provider", + "providers/documentation/planner-provider", "providers/documentation/postgresql-provider", + "providers/documentation/prometheus-provider", "providers/documentation/pushover-provider", + "providers/documentation/python-provider", + "providers/documentation/quickchart-provider", "providers/documentation/redmine-provider", "providers/documentation/resend-provider", "providers/documentation/rollbar-provider", "providers/documentation/sendgrid-provider", "providers/documentation/sentry-provider", + "providers/documentation/service-now-provider", "providers/documentation/signalfx-provider", "providers/documentation/signl4-provider", "providers/documentation/site24x7-provider", "providers/documentation/slack-provider", + "providers/documentation/smtp-provider", "providers/documentation/snowflake-provider", - "providers/documentation/statuscake-provider", "providers/documentation/splunk-provider", "providers/documentation/squadcast-provider", "providers/documentation/ssh-provider", + "providers/documentation/statuscake-provider", + "providers/documentation/sumologic-provider", "providers/documentation/teams-provider", "providers/documentation/telegram-provider", + "providers/documentation/template", + "providers/documentation/thousandeyes-provider", "providers/documentation/trello-provider", "providers/documentation/twilio-provider", "providers/documentation/uptimekuma-provider", + "providers/documentation/victorialogs-provider", "providers/documentation/victoriametrics-provider", + "providers/documentation/vllm-provider", + "providers/documentation/wazuh-provider", + "providers/documentation/webhook-provider", "providers/documentation/websocket-provider", - "providers/documentation/incidentmanager-provider", + "providers/documentation/youtrack-provider", "providers/documentation/zabbix-provider", - "providers/documentation/zenduty-provider" + "providers/documentation/zenduty-provider", + "providers/documentation/zoom-provider", + "providers/documentation/zoom_chat-provider" ] - } + }, + "providers/adding-a-new-provider" ] }, { - "group": "Workflows", + "group": "Deployment", "pages": [ - "workflows/overview", - { - "group": "Syntax", - "pages": [ - "workflows/syntax/basic-syntax", - "workflows/syntax/foreach-syntax", - "workflows/syntax/context-syntax" - ] - }, - { - "group": "Providers", - "pages": [ - "workflows/providers/getting-started", - "workflows/providers/what-is-a-provider" - ] - }, + "deployment/configuration", + "deployment/monitoring", { - "group": "Conditions", + "group": "Authentication", "pages": [ - "workflows/conditions/what-is-a-condition", - "workflows/conditions/threshold", - "workflows/conditions/assert", - "workflows/conditions/stddev" + "deployment/authentication/overview", + "deployment/authentication/no-auth", + "deployment/authentication/db-auth", + "deployment/authentication/auth0-auth", + "deployment/authentication/azuread-auth", + "deployment/authentication/keycloak-auth", + "deployment/authentication/oauth2proxy-auth", + "deployment/authentication/oauth2-proxy-gitlab", + "deployment/authentication/okta-auth", + "deployment/authentication/onelogin-auth" ] }, { - "group": "Functions", + "group": "Provision", "pages": [ - "workflows/functions/what-is-a-function", - "workflows/functions/all", - "workflows/functions/add-time-to-date", - "workflows/functions/diff", - "workflows/functions/len", - "workflows/functions/split", - "workflows/functions/first", - "workflows/functions/utcnow", - "workflows/functions/to-utc", - "workflows/functions/datetime-compare", - "workflows/functions/encode" + "deployment/provision/overview", + "deployment/provision/provider", + "deployment/provision/workflow", + "deployment/provision/dashboard" ] }, + "deployment/secret-store", { - "group": "Throttles", + "group": "Deploy On", "pages": [ - "workflows/throttles/what-is-a-throttle", - "workflows/throttles/one-until-resolved" + "deployment/docker", + { + "group": "Kubernetes", + "pages": [ + "deployment/kubernetes/overview", + "deployment/kubernetes/installation", + "deployment/kubernetes/architecture", + "deployment/kubernetes/openshift" + ] + }, + "deployment/openshift", + "deployment/ecs" ] }, { - "group": "Examples", - "pages": [ - "workflows/examples/multi-step-alert", - "workflows/examples/reusable-action-alert" - ] + "group": "Local LLM", + "pages": ["deployment/local-llm/keep-with-litellm"] }, - "workflows/state" + "deployment/stress-testing" ] }, { - "group": "Keep API", - "pages": [ - { - "group": "Providers", - "pages": [ - "api-ref/providers/get-providers", - "api-ref/providers/get-alerts-configuration", - "api-ref/providers/get-logs", - "api-ref/providers/get-alerts-schema", - "api-ref/providers/add-alert", - "api-ref/providers/test-provider", - "api-ref/providers/delete-provider", - "api-ref/providers/install-provider", - "api-ref/providers/install-provider-webhook", - "api-ref/providers/get-webhook-settings", - "api-ref/providers/get-installed-providers", - "api-ref/providers/install-provider-oauth2", - "api-ref/providers/invoke-provider-method", - - "api-ref/providers/update-provider", - "api-ref/providers/validate-provider-scopes" - ] - }, - { - "group": "Healthcheck", - "pages": ["api-ref/healthcheck/healthcheck"] - }, - { - "group": "Alerts", - "pages": [ - "api-ref/alerts/get-alerts", - "api-ref/alerts/receive-event", - "api-ref/alerts/get-alert", - "api-ref/alerts/assign-alert", - "api-ref/alerts/delete-alert", - "api-ref/alerts/enrich-alert", - "api-ref/alerts/get-alert-history", - "api-ref/alerts/get-all-alerts", - "api-ref/alerts/receive-event", - "api-ref/alerts/receive-generic-event", - "api-ref/alerts/search-alerts" - ] - }, - { - "group": "Enrichment", - "pages": [ - "api-ref/enrichment/create-extraction-rule", - "api-ref/enrichment/create-rule", - "api-ref/enrichment/delete-extraction-rule", - "api-ref/enrichment/delete-rule", - "api-ref/enrichment/get-extraction-rules", - "api-ref/enrichment/get-rules", - "api-ref/enrichment/update-extraction-rule", - "api-ref/enrichment/update-rule" - ] - }, - { - "group": "Groups", - "pages": [ - "api-ref/groups/get-groups" - ] - }, - { - "group": "Whoami", - "pages": [ - "api-ref/whoami/get-tenant-id" - ] - }, - { - "group": "Mapping", - "pages": [ - "api-ref/mapping/create-mapping", - "api-ref/mapping/get-mappings" - ] - }, - { - "group": "Preset", - "pages": [ - "api-ref/preset/create-preset", - "api-ref/preset/delete-preset", - "api-ref/preset/get-presets", - "api-ref/preset/update-preset" - ] - }, - { - "group": "Pusher", - "pages": [ - "api-ref/pusher/pusher-authentication" - ] - }, - { - "group": "Rules", - "pages": [ - "api-ref/rules/create-rule", - "api-ref/rules/delete-rule", - "api-ref/rules/get-rules", - "api-ref/rules/update-rule" - ] - }, - { - "group": "Webhook settings", - "pages": [ - "api-ref/settings/webhook-settings", - "api-ref/settings/create-key", - "api-ref/settings/create-user", - "api-ref/settings/delete-api-key", - "api-ref/settings/delete-smtp-settings", - "api-ref/settings/delete-user", - "api-ref/settings/get-keys", - "api-ref/settings/get-smtp-settings", - "api-ref/settings/get-users", - "api-ref/settings/test-smtp-settings", - "api-ref/settings/update-api-key", - "api-ref/settings/update-smtp-settings" - ] - }, - { - "group": "Users", - "pages": [ - "api-ref/users/get-users", - "api-ref/users/create-user", - "api-ref/users/delete-user", - "api-ref/users/delete-user-by-email" - ] - }, - { - "group": "Status", - "pages": [ - "api-ref/status/status" - ] - }, - - { - "group": "Workflows", - "pages": [ - "api-ref/workflows/get-workflows", - "api-ref/workflows/create-workflow", - "api-ref/workflows/run-workflow", - "api-ref/workflows/get-workflow-by-id", - "api-ref/workflows/delete-workflow-by-id", - "api-ref/workflows/get-raw-workflow-by-id", - "api-ref/workflows/get-workflow-execution-status", - "api-ref/workflows/get-workflow-executions", - "api-ref/workflows/get-workflow-executions-by-alert-fingerprint", - "api-ref/workflows/update-workflow-by-id" - ] - }, - { - "group": "Actions", - "pages": [ - "api-ref/actions/get-actions", - "api-ref/actions/add-actions", - "api-ref/actions/update-action", - "api-ref/actions/delete-action" - ] - } - ] + "group": "Development", + "pages": ["development/getting-started", "development/external-url"] }, { "group": "Keep CLI", @@ -379,6 +343,7 @@ { "group": "keep alert", "pages": [ + "cli/commands/cli-alert", "cli/commands/alert-enrich", "cli/commands/alert-get", "cli/commands/alert-list" @@ -387,6 +352,7 @@ { "group": "keep provider", "pages": [ + "cli/commands/cli-provider", "cli/commands/provider-connect", "cli/commands/provider-delete", "cli/commands/provider-list" @@ -395,9 +361,11 @@ { "group": "keep workflow", "pages": [ + "cli/commands/cli-workflow", "cli/commands/workflow-apply", "cli/commands/workflow-list", "cli/commands/workflow-run", + "cli/commands/workflow-runs", { "group": "keep workflow runs", "pages": ["cli/commands/runs-logs", "cli/commands/runs-list"] @@ -412,7 +380,19 @@ "cli/commands/mappings-delete" ] }, + { + "group": "keep extractions", + "pages": [ + "cli/commands/extraction-create", + "cli/commands/extraction-delete", + "cli/commands/extractions-list" + ] + }, + "cli/commands/cli", "cli/commands/cli-api", + "cli/commands/cli-config-new", + "cli/commands/cli-config-show", + "cli/commands/cli-run", "cli/commands/cli-config", "cli/commands/cli-version", "cli/commands/cli-whoami" diff --git a/docs/openapi.json b/docs/openapi.json index 380a330025..8b0c758637 100644 --- a/docs/openapi.json +++ b/docs/openapi.json @@ -1 +1 @@ -{"openapi":"3.1.0","info":{"title":"Keep API","description":"Rest API powering https://platform.keephq.dev and friends 🏄‍♀️","version":"0.1.0"},"paths":{"/providers":{"get":{"tags":["providers"],"summary":"Get Providers","operationId":"get_providers_providers_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/providers/export":{"get":{"tags":["providers"],"summary":"Get Installed Providers","description":"export all installed providers","operationId":"get_installed_providers_providers_export_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/providers/{provider_type}/{provider_id}/configured-alerts":{"get":{"tags":["providers"],"summary":"Get Alerts Configuration","description":"Get alerts configuration from a provider","operationId":"get_alerts_configuration_providers__provider_type___provider_id__configured_alerts_get","parameters":[{"required":true,"schema":{"type":"string","title":"Provider Type"},"name":"provider_type","in":"path"},{"required":true,"schema":{"type":"string","title":"Provider Id"},"name":"provider_id","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"items":{},"type":"array","title":"Response Get Alerts Configuration Providers Provider Type Provider Id Configured Alerts Get"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/providers/{provider_type}/{provider_id}/logs":{"get":{"tags":["providers"],"summary":"Get Logs","description":"Get logs from a provider","operationId":"get_logs_providers__provider_type___provider_id__logs_get","parameters":[{"required":true,"schema":{"type":"string","title":"Provider Type"},"name":"provider_type","in":"path"},{"required":true,"schema":{"type":"string","title":"Provider Id"},"name":"provider_id","in":"path"},{"required":false,"schema":{"type":"integer","title":"Limit","default":5},"name":"limit","in":"query"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"items":{},"type":"array","title":"Response Get Logs Providers Provider Type Provider Id Logs Get"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/providers/{provider_type}/schema":{"get":{"tags":["providers"],"summary":"Get Alerts Schema","description":"Get the provider's API schema used to push alerts configuration","operationId":"get_alerts_schema_providers__provider_type__schema_get","parameters":[{"required":true,"schema":{"type":"string","title":"Provider Type"},"name":"provider_type","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"type":"object","title":"Response Get Alerts Schema Providers Provider Type Schema Get"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/providers/{provider_type}/{provider_id}/alerts":{"post":{"tags":["providers"],"summary":"Add Alert","description":"Push new alerts to the provider","operationId":"add_alert_providers__provider_type___provider_id__alerts_post","parameters":[{"required":true,"schema":{"type":"string","title":"Provider Type"},"name":"provider_type","in":"path"},{"required":true,"schema":{"type":"string","title":"Provider Id"},"name":"provider_id","in":"path"},{"required":false,"schema":{"type":"string","title":"Alert Id"},"name":"alert_id","in":"query"}],"requestBody":{"content":{"application/json":{"schema":{"type":"object","title":"Alert"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/providers/test":{"post":{"tags":["providers"],"summary":"Test Provider","description":"Test a provider's alert retrieval","operationId":"test_provider_providers_test_post","requestBody":{"content":{"application/json":{"schema":{"type":"object","title":"Provider Info"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/providers/{provider_type}/{provider_id}":{"delete":{"tags":["providers"],"summary":"Delete Provider","operationId":"delete_provider_providers__provider_type___provider_id__delete","parameters":[{"required":true,"schema":{"type":"string","title":"Provider Type"},"name":"provider_type","in":"path"},{"required":true,"schema":{"type":"string","title":"Provider Id"},"name":"provider_id","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/providers/{provider_id}/scopes":{"post":{"tags":["providers"],"summary":"Validate Provider Scopes","description":"Validate provider scopes","operationId":"validate_provider_scopes_providers__provider_id__scopes_post","parameters":[{"required":true,"schema":{"type":"string","title":"Provider Id"},"name":"provider_id","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"additionalProperties":{"anyOf":[{"type":"boolean"},{"type":"string"}]},"type":"object","title":"Response Validate Provider Scopes Providers Provider Id Scopes Post"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/providers/{provider_id}":{"put":{"tags":["providers"],"summary":"Update Provider","description":"Update provider","operationId":"update_provider_providers__provider_id__put","parameters":[{"required":true,"schema":{"type":"string","title":"Provider Id"},"name":"provider_id","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/providers/install":{"post":{"tags":["providers"],"summary":"Install Provider","operationId":"install_provider_providers_install_post","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/providers/install/oauth2/{provider_type}":{"post":{"tags":["providers"],"summary":"Install Provider Oauth2","operationId":"install_provider_oauth2_providers_install_oauth2__provider_type__post","parameters":[{"required":true,"schema":{"type":"string","title":"Provider Type"},"name":"provider_type","in":"path"}],"requestBody":{"content":{"application/json":{"schema":{"type":"object","title":"Provider Info"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/providers/{provider_id}/invoke/{method}":{"post":{"tags":["providers"],"summary":"Invoke Provider Method","description":"Invoke provider special method","operationId":"invoke_provider_method_providers__provider_id__invoke__method__post","parameters":[{"required":true,"schema":{"type":"string","title":"Provider Id"},"name":"provider_id","in":"path"},{"required":true,"schema":{"type":"string","title":"Method"},"name":"method","in":"path"}],"requestBody":{"content":{"application/json":{"schema":{"type":"object","title":"Method Params"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/providers/install/webhook/{provider_type}/{provider_id}":{"post":{"tags":["providers"],"summary":"Install Provider Webhook","operationId":"install_provider_webhook_providers_install_webhook__provider_type___provider_id__post","parameters":[{"required":true,"schema":{"type":"string","title":"Provider Type"},"name":"provider_type","in":"path"},{"required":true,"schema":{"type":"string","title":"Provider Id"},"name":"provider_id","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/providers/{provider_type}/webhook":{"get":{"tags":["providers"],"summary":"Get Webhook Settings","operationId":"get_webhook_settings_providers__provider_type__webhook_get","parameters":[{"required":true,"schema":{"type":"string","title":"Provider Type"},"name":"provider_type","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ProviderWebhookSettings"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/healthcheck":{"get":{"tags":["healthcheck"],"summary":"Healthcheck","description":"simple healthcheck endpoint","operationId":"healthcheck_healthcheck_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"type":"object","title":"Response Healthcheck Healthcheck Get"}}}}}}},"/alerts":{"get":{"tags":["alerts"],"summary":"Get All Alerts","description":"Get last alerts occurrence","operationId":"get_all_alerts_alerts_get","parameters":[{"required":false,"schema":{"type":"boolean","title":"Sync","default":false},"name":"sync","in":"query"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"items":{"$ref":"#/components/schemas/AlertDto"},"type":"array","title":"Response Get All Alerts Alerts Get"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]},"delete":{"tags":["alerts"],"summary":"Delete Alert","description":"Delete alert by finerprint and last received time","operationId":"delete_alert_alerts_delete","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/DeleteRequestBody"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"additionalProperties":{"type":"string"},"type":"object","title":"Response Delete Alert Alerts Delete"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/alerts/{fingerprint}/history":{"get":{"tags":["alerts"],"summary":"Get Alert History","description":"Get alert history","operationId":"get_alert_history_alerts__fingerprint__history_get","parameters":[{"required":true,"schema":{"type":"string","title":"Fingerprint"},"name":"fingerprint","in":"path"},{"required":false,"schema":{"type":"string","title":"Provider Id"},"name":"provider_id","in":"query"},{"required":false,"schema":{"type":"string","title":"Provider Type"},"name":"provider_type","in":"query"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"items":{"$ref":"#/components/schemas/AlertDto"},"type":"array","title":"Response Get Alert History Alerts Fingerprint History Get"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/alerts/{fingerprint}/assign/{last_received}":{"post":{"tags":["alerts"],"summary":"Assign Alert","description":"Assign alert to user","operationId":"assign_alert_alerts__fingerprint__assign__last_received__post","parameters":[{"required":true,"schema":{"type":"string","title":"Fingerprint"},"name":"fingerprint","in":"path"},{"required":true,"schema":{"type":"string","title":"Last Received"},"name":"last_received","in":"path"},{"required":false,"schema":{"type":"boolean","title":"Unassign","default":false},"name":"unassign","in":"query"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"additionalProperties":{"type":"string"},"type":"object","title":"Response Assign Alert Alerts Fingerprint Assign Last Received Post"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/alerts/event":{"post":{"tags":["alerts"],"summary":"Receive Generic Event","description":"Receive a generic alert event","operationId":"receive_generic_event_alerts_event_post","parameters":[{"required":false,"schema":{"type":"string","title":"Fingerprint"},"name":"fingerprint","in":"query"}],"requestBody":{"content":{"application/json":{"schema":{"anyOf":[{"$ref":"#/components/schemas/AlertDto"},{"items":{"$ref":"#/components/schemas/AlertDto"},"type":"array"},{"type":"object"}],"title":"Event"}}},"required":true},"responses":{"201":{"description":"Successful Response","content":{"application/json":{"schema":{"anyOf":[{"$ref":"#/components/schemas/AlertDto"},{"items":{"$ref":"#/components/schemas/AlertDto"},"type":"array"}],"title":"Response Receive Generic Event Alerts Event Post"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/alerts/event/{provider_type}":{"post":{"tags":["alerts"],"summary":"Receive Event","description":"Receive an alert event from a provider","operationId":"receive_event_alerts_event__provider_type__post","parameters":[{"required":true,"schema":{"type":"string","title":"Provider Type"},"name":"provider_type","in":"path"},{"required":false,"schema":{"type":"string","title":"Provider Id"},"name":"provider_id","in":"query"},{"required":false,"schema":{"type":"string","title":"Fingerprint"},"name":"fingerprint","in":"query"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"additionalProperties":{"type":"string"},"type":"object","title":"Response Receive Event Alerts Event Provider Type Post"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/alerts/{fingerprint}":{"get":{"tags":["alerts"],"summary":"Get Alert","description":"Get alert by fingerprint","operationId":"get_alert_alerts__fingerprint__get","parameters":[{"required":true,"schema":{"type":"string","title":"Fingerprint"},"name":"fingerprint","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/AlertDto"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/alerts/enrich":{"post":{"tags":["alerts"],"summary":"Enrich Alert","description":"Enrich an alert","operationId":"enrich_alert_alerts_enrich_post","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/EnrichAlertRequestBody"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"additionalProperties":{"type":"string"},"type":"object","title":"Response Enrich Alert Alerts Enrich Post"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/alerts/search":{"post":{"tags":["alerts"],"summary":"Search Alerts","description":"Search alerts","operationId":"search_alerts_alerts_search_post","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/SearchAlertsRequest"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"items":{"$ref":"#/components/schemas/AlertDto"},"type":"array","title":"Response Search Alerts Alerts Search Post"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/settings/webhook":{"get":{"tags":["settings"],"summary":"Webhook Settings","description":"Get details about the webhook endpoint (e.g. the API url and an API key)","operationId":"webhook_settings_settings_webhook_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WebhookSettings"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/settings/users":{"get":{"tags":["settings"],"summary":"Get Users","description":"Get all users","operationId":"get_users_settings_users_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"items":{"$ref":"#/components/schemas/User"},"type":"array","title":"Response Get Users Settings Users Get"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]},"post":{"tags":["settings"],"summary":"Create User","description":"Create a user","operationId":"create_user_settings_users_post","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/keep__api__routes__settings__CreateUserRequest"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/settings/users/{user_email}":{"delete":{"tags":["settings"],"summary":"Delete User","description":"Delete a user","operationId":"delete_user_settings_users__user_email__delete","parameters":[{"required":true,"schema":{"type":"string","title":"User Email"},"name":"user_email","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/settings/smtp":{"get":{"tags":["settings"],"summary":"Get Smtp Settings","description":"Get SMTP settings","operationId":"get_smtp_settings_settings_smtp_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]},"post":{"tags":["settings"],"summary":"Update Smtp Settings","description":"Install or update SMTP settings","operationId":"update_smtp_settings_settings_smtp_post","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/SMTPSettings"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]},"delete":{"tags":["settings"],"summary":"Delete Smtp Settings","description":"Delete SMTP settings","operationId":"delete_smtp_settings_settings_smtp_delete","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/settings/smtp/test":{"post":{"tags":["settings"],"summary":"Test Smtp Settings","description":"Test SMTP settings","operationId":"test_smtp_settings_settings_smtp_test_post","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/SMTPSettings"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/settings/apikey":{"put":{"tags":["settings"],"summary":"Update Api Key","description":"Update API key secret","operationId":"update_api_key_settings_apikey_put","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]},"post":{"tags":["settings"],"summary":"Create Key","description":"Create API key","operationId":"create_key_settings_apikey_post","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/settings/apikeys":{"get":{"tags":["settings"],"summary":"Get Keys","description":"Get API keys","operationId":"get_keys_settings_apikeys_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/settings/apikey/{keyId}":{"delete":{"tags":["settings"],"summary":"Delete Api Key","description":"Delete API key","operationId":"delete_api_key_settings_apikey__keyId__delete","parameters":[{"required":true,"schema":{"type":"string","title":"Keyid"},"name":"keyId","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/workflows":{"get":{"tags":["workflows","alerts"],"summary":"Get Workflows","description":"Get workflows","operationId":"get_workflows_workflows_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"items":{"$ref":"#/components/schemas/WorkflowDTO"},"type":"array","title":"Response Get Workflows Workflows Get"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]},"post":{"tags":["workflows","alerts"],"summary":"Create Workflow","description":"Create or update a workflow","operationId":"create_workflow_workflows_post","requestBody":{"content":{"multipart/form-data":{"schema":{"$ref":"#/components/schemas/Body_create_workflow_workflows_post"}}}},"responses":{"201":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkflowCreateOrUpdateDTO"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/workflows/export":{"get":{"tags":["workflows","alerts"],"summary":"Export Workflows","description":"export all workflow Yamls","operationId":"export_workflows_workflows_export_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"items":{"type":"string"},"type":"array","title":"Response Export Workflows Workflows Export Get"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/workflows/{workflow_id}/run":{"post":{"tags":["workflows","alerts"],"summary":"Run Workflow","description":"Run a workflow","operationId":"run_workflow_workflows__workflow_id__run_post","parameters":[{"required":true,"schema":{"type":"string","title":"Workflow Id"},"name":"workflow_id","in":"path"}],"requestBody":{"content":{"application/json":{"schema":{"type":"object","title":"Body"}}}},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"type":"object","title":"Response Run Workflow Workflows Workflow Id Run Post"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/workflows/test":{"post":{"tags":["workflows","alerts"],"summary":"Run Workflow From Definition","description":"Test run a workflow from a definition","operationId":"run_workflow_from_definition_workflows_test_post","requestBody":{"content":{"multipart/form-data":{"schema":{"$ref":"#/components/schemas/Body_run_workflow_from_definition_workflows_test_post"}}}},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"type":"object","title":"Response Run Workflow From Definition Workflows Test Post"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/workflows/{workflow_id}":{"get":{"tags":["workflows","alerts"],"summary":"Get Workflow By Id","description":"Get workflow executions by ID","operationId":"get_workflow_by_id_workflows__workflow_id__get","parameters":[{"required":true,"schema":{"type":"string","title":"Workflow Id"},"name":"workflow_id","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"items":{"$ref":"#/components/schemas/WorkflowExecutionDTO"},"type":"array","title":"Response Get Workflow By Id Workflows Workflow Id Get"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]},"put":{"tags":["workflows","alerts"],"summary":"Update Workflow By Id","description":"Update a workflow","operationId":"update_workflow_by_id_workflows__workflow_id__put","parameters":[{"required":true,"schema":{"type":"string","title":"Workflow Id"},"name":"workflow_id","in":"path"}],"responses":{"201":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkflowCreateOrUpdateDTO"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]},"delete":{"tags":["workflows","alerts"],"summary":"Delete Workflow By Id","description":"Delete workflow","operationId":"delete_workflow_by_id_workflows__workflow_id__delete","parameters":[{"required":true,"schema":{"type":"string","title":"Workflow Id"},"name":"workflow_id","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/workflows/{workflow_id}/raw":{"get":{"tags":["workflows","alerts"],"summary":"Get Raw Workflow By Id","description":"Get workflow executions by ID","operationId":"get_raw_workflow_by_id_workflows__workflow_id__raw_get","parameters":[{"required":true,"schema":{"type":"string","title":"Workflow Id"},"name":"workflow_id","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"type":"string","title":"Response Get Raw Workflow By Id Workflows Workflow Id Raw Get"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/workflows/executions":{"get":{"tags":["workflows","alerts"],"summary":"Get Workflow Executions By Alert Fingerprint","description":"Get workflow executions by alert fingerprint","operationId":"get_workflow_executions_by_alert_fingerprint_workflows_executions_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"items":{"$ref":"#/components/schemas/WorkflowToAlertExecutionDTO"},"type":"array","title":"Response Get Workflow Executions By Alert Fingerprint Workflows Executions Get"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/workflows/{workflow_id}/runs/{workflow_execution_id}":{"get":{"tags":["workflows","alerts"],"summary":"Get Workflow Execution Status","description":"Get a workflow execution status","operationId":"get_workflow_execution_status_workflows__workflow_id__runs__workflow_execution_id__get","parameters":[{"required":true,"schema":{"type":"string","title":"Workflow Execution Id"},"name":"workflow_execution_id","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkflowExecutionDTO"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/workflows/executions/list":{"get":{"tags":["workflows","alerts"],"summary":"Get Workflow Executions","description":"List last workflow executions","operationId":"get_workflow_executions_workflows_executions_list_get","parameters":[{"description":"Workflow execution ID","required":false,"schema":{"type":"string","title":"Workflow Execution Id","description":"Workflow execution ID"},"name":"workflow_execution_id","in":"query"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"items":{"$ref":"#/components/schemas/WorkflowExecutionDTO"},"type":"array","title":"Response Get Workflow Executions Workflows Executions List Get"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/whoami":{"get":{"tags":["whoami"],"summary":"Get Tenant Id","description":"Get tenant id","operationId":"get_tenant_id_whoami_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"type":"object","title":"Response Get Tenant Id Whoami Get"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/pusher/auth":{"post":{"tags":["pusher"],"summary":"Pusher Authentication","description":"Authenticate a user to a private channel\n\nArgs:\n request (Request): The request object\n tenant_id (str, optional): The tenant ID. Defaults to Depends(verify_bearer_token).\n pusher_client (Pusher, optional): Pusher client. Defaults to Depends(get_pusher_client).\n\nRaises:\n HTTPException: 403 if the user is not allowed to access the channel.\n\nReturns:\n dict: The authentication response.","operationId":"pusher_authentication_pusher_auth_post","requestBody":{"content":{"application/x-www-form-urlencoded":{"schema":{"$ref":"#/components/schemas/Body_pusher_authentication_pusher_auth_post"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"type":"object","title":"Response Pusher Authentication Pusher Auth Post"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/status":{"get":{"tags":["status"],"summary":"Status","description":"simple status endpoint","operationId":"status_status_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"type":"object","title":"Response Status Status Get"}}}}}}},"/rules":{"get":{"tags":["rules"],"summary":"Get Rules","description":"Get Rules","operationId":"get_rules_rules_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]},"post":{"tags":["rules"],"summary":"Create Rule","description":"Create Rule","operationId":"create_rule_rules_post","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/RuleCreateDto"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/rules/{rule_id}":{"put":{"tags":["rules"],"summary":"Update Rule","description":"Update Rule","operationId":"update_rule_rules__rule_id__put","parameters":[{"required":true,"schema":{"type":"string","title":"Rule Id"},"name":"rule_id","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]},"delete":{"tags":["rules"],"summary":"Delete Rule","description":"Delete Rule","operationId":"delete_rule_rules__rule_id__delete","parameters":[{"required":true,"schema":{"type":"string","title":"Rule Id"},"name":"rule_id","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/preset":{"get":{"tags":["preset"],"summary":"Get Presets","description":"Get all presets for tenant","operationId":"get_presets_preset_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"items":{"$ref":"#/components/schemas/PresetDto"},"type":"array","title":"Response Get Presets Preset Get"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]},"post":{"tags":["preset"],"summary":"Create Preset","description":"Create a preset for tenant","operationId":"create_preset_preset_post","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateOrUpdatePresetDto"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PresetDto"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/preset/{uuid}":{"put":{"tags":["preset"],"summary":"Update Preset","description":"Update a preset for tenant","operationId":"update_preset_preset__uuid__put","parameters":[{"required":true,"schema":{"type":"string","title":"Uuid"},"name":"uuid","in":"path"}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateOrUpdatePresetDto"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PresetDto"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]},"delete":{"tags":["preset"],"summary":"Delete Preset","description":"Delete a preset for tenant","operationId":"delete_preset_preset__uuid__delete","parameters":[{"required":true,"schema":{"type":"string","title":"Uuid"},"name":"uuid","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/groups/":{"get":{"tags":["groups"],"summary":"Get Groups","description":"Get groups","operationId":"get_groups_groups__get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"items":{"type":"object"},"type":"array","title":"Response Get Groups Groups Get"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/users":{"get":{"tags":["users"],"summary":"Get Users","description":"Get all users","operationId":"get_users_users_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"items":{"$ref":"#/components/schemas/User"},"type":"array","title":"Response Get Users Users Get"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]},"post":{"tags":["users"],"summary":"Create User","description":"Create a user","operationId":"create_user_users_post","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/keep__api__routes__users__CreateUserRequest"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/users/{user_email}":{"delete":{"tags":["users"],"summary":"Delete User","description":"Delete a user","operationId":"delete_user_users__user_email__delete","parameters":[{"required":true,"schema":{"type":"string","title":"User Email"},"name":"user_email","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/mapping":{"get":{"tags":["enrichment","mapping"],"summary":"Get Rules","description":"Get all mapping rules","operationId":"get_rules_mapping_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"items":{"$ref":"#/components/schemas/MappingRuleDtoOut"},"type":"array","title":"Response Get Rules Mapping Get"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]},"put":{"tags":["enrichment","mapping"],"summary":"Update Rule","description":"Update an existing rule","operationId":"update_rule_mapping_put","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/MappingRuleDtoUpdate"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/MappingRuleDtoOut"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]},"post":{"tags":["enrichment","mapping"],"summary":"Create Rule","description":"Create a new mapping rule","operationId":"create_rule_mapping_post","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/MappingRuleDtoIn"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/MappingRule"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/mapping/{rule_id}":{"delete":{"tags":["enrichment","mapping"],"summary":"Delete Rule","description":"Delete a mapping rule","operationId":"delete_rule_mapping__rule_id__delete","parameters":[{"required":true,"schema":{"type":"integer","title":"Rule Id"},"name":"rule_id","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/extraction":{"get":{"tags":["enrichment","extraction"],"summary":"Get Extraction Rules","description":"Get all extraction rules","operationId":"get_extraction_rules_extraction_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"items":{"$ref":"#/components/schemas/ExtractionRuleDtoOut"},"type":"array","title":"Response Get Extraction Rules Extraction Get"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]},"post":{"tags":["enrichment","extraction"],"summary":"Create Extraction Rule","description":"Create a new extraction rule","operationId":"create_extraction_rule_extraction_post","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ExtractionRuleDtoBase"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ExtractionRuleDtoOut"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}},"/extraction/{rule_id}":{"put":{"tags":["enrichment","extraction"],"summary":"Update Extraction Rule","description":"Update an existing extraction rule","operationId":"update_extraction_rule_extraction__rule_id__put","parameters":[{"required":true,"schema":{"type":"integer","title":"Rule Id"},"name":"rule_id","in":"path"}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ExtractionRuleDtoBase"}}},"required":true},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ExtractionRuleDtoOut"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]},"delete":{"tags":["enrichment","extraction"],"summary":"Delete Extraction Rule","description":"Delete an extraction rule","operationId":"delete_extraction_rule_extraction__rule_id__delete","parameters":[{"required":true,"schema":{"type":"integer","title":"Rule Id"},"name":"rule_id","in":"path"}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}},"security":[{"API Key":[]},{"HTTPBasic":[]},{"OAuth2PasswordBearer":[]}]}}},"components":{"schemas":{"AlertDto":{"properties":{"id":{"type":"string","title":"Id"},"name":{"type":"string","title":"Name"},"status":{"$ref":"#/components/schemas/AlertStatus"},"severity":{"$ref":"#/components/schemas/AlertSeverity"},"lastReceived":{"type":"string","title":"Lastreceived"},"environment":{"type":"string","title":"Environment","default":"undefined"},"isDuplicate":{"type":"boolean","title":"Isduplicate"},"duplicateReason":{"type":"string","title":"Duplicatereason"},"service":{"type":"string","title":"Service"},"source":{"items":{"type":"string"},"type":"array","title":"Source","default":[]},"apiKeyRef":{"type":"string","title":"Apikeyref"},"message":{"type":"string","title":"Message"},"description":{"type":"string","title":"Description"},"pushed":{"type":"boolean","title":"Pushed","default":false},"event_id":{"type":"string","title":"Event Id"},"url":{"type":"string","maxLength":65536,"minLength":1,"format":"uri","title":"Url"},"labels":{"type":"object","title":"Labels","default":{}},"fingerprint":{"type":"string","title":"Fingerprint"},"deleted":{"type":"boolean","title":"Deleted","default":false},"dismissUntil":{"type":"string","title":"Dismissuntil"},"dismissed":{"type":"boolean","title":"Dismissed","default":false},"assignee":{"type":"string","title":"Assignee"},"providerId":{"type":"string","title":"Providerid"},"group":{"type":"boolean","title":"Group","default":false},"note":{"type":"string","title":"Note"},"startedAt":{"type":"string","title":"Startedat"},"isNoisy":{"type":"boolean","title":"Isnoisy","default":false}},"type":"object","required":["id","name","status","severity","lastReceived"],"title":"AlertDto","examples":[{"id":"1234","name":"Alert name","status":"firing","lastReceived":"2021-01-01T00:00:00.000Z","environment":"production","isDuplicate":false,"service":"backend","source":["keep"],"message":"Keep: Alert message","description":"Keep: Alert description","severity":"critical","pushed":true,"event_id":"1234","url":"https://www.keephq.dev?alertId=1234","labels":{"key":"value"},"ticket_url":"https://www.keephq.dev?enrichedTicketId=456","fingerprint":"1234"}]},"AlertSeverity":{"enum":["critical","high","warning","info","low"],"title":"AlertSeverity","description":"An enumeration."},"AlertStatus":{"enum":["firing","resolved","acknowledged","suppressed","pending"],"title":"AlertStatus","description":"An enumeration."},"Body_create_workflow_workflows_post":{"properties":{"file":{"type":"string","format":"binary","title":"File"}},"type":"object","title":"Body_create_workflow_workflows_post"},"Body_pusher_authentication_pusher_auth_post":{"properties":{"channel_name":{"title":"Channel Name"},"socket_id":{"title":"Socket Id"}},"type":"object","required":["channel_name","socket_id"],"title":"Body_pusher_authentication_pusher_auth_post"},"Body_run_workflow_from_definition_workflows_test_post":{"properties":{"file":{"type":"string","format":"binary","title":"File"}},"type":"object","title":"Body_run_workflow_from_definition_workflows_test_post"},"CreateOrUpdatePresetDto":{"properties":{"name":{"type":"string","title":"Name"},"options":{"items":{"$ref":"#/components/schemas/PresetOption"},"type":"array","title":"Options"},"is_private":{"type":"boolean","title":"Is Private","default":false},"is_noisy":{"type":"boolean","title":"Is Noisy","default":false}},"type":"object","required":["options"],"title":"CreateOrUpdatePresetDto"},"DeleteRequestBody":{"properties":{"fingerprint":{"type":"string","title":"Fingerprint"},"lastReceived":{"type":"string","title":"Lastreceived"},"restore":{"type":"boolean","title":"Restore","default":false}},"type":"object","required":["fingerprint","lastReceived"],"title":"DeleteRequestBody"},"EnrichAlertRequestBody":{"properties":{"enrichments":{"additionalProperties":{"type":"string"},"type":"object","title":"Enrichments"},"fingerprint":{"type":"string","title":"Fingerprint"}},"type":"object","required":["enrichments","fingerprint"],"title":"EnrichAlertRequestBody"},"ExtractionRuleDtoBase":{"properties":{"name":{"type":"string","title":"Name"},"description":{"type":"string","title":"Description"},"priority":{"type":"integer","title":"Priority","default":0},"attribute":{"type":"string","title":"Attribute"},"condition":{"type":"string","title":"Condition"},"disabled":{"type":"boolean","title":"Disabled","default":false},"regex":{"type":"string","title":"Regex"},"pre":{"type":"boolean","title":"Pre","default":false}},"type":"object","required":["name","regex"],"title":"ExtractionRuleDtoBase"},"ExtractionRuleDtoOut":{"properties":{"name":{"type":"string","title":"Name"},"description":{"type":"string","title":"Description"},"priority":{"type":"integer","title":"Priority","default":0},"attribute":{"type":"string","title":"Attribute"},"condition":{"type":"string","title":"Condition"},"disabled":{"type":"boolean","title":"Disabled","default":false},"regex":{"type":"string","title":"Regex"},"pre":{"type":"boolean","title":"Pre","default":false},"id":{"type":"integer","title":"Id"},"created_by":{"type":"string","title":"Created By"},"created_at":{"type":"string","format":"date-time","title":"Created At"},"updated_by":{"type":"string","title":"Updated By"},"updated_at":{"type":"string","format":"date-time","title":"Updated At"}},"type":"object","required":["name","regex","id","created_at"],"title":"ExtractionRuleDtoOut"},"HTTPValidationError":{"properties":{"detail":{"items":{"$ref":"#/components/schemas/ValidationError"},"type":"array","title":"Detail"}},"type":"object","title":"HTTPValidationError"},"MappingRule":{"properties":{"id":{"type":"integer","title":"Id"},"tenant_id":{"type":"string","title":"Tenant Id"},"priority":{"type":"integer","title":"Priority","default":0},"name":{"type":"string","maxLength":255,"title":"Name"},"description":{"type":"string","maxLength":2048,"title":"Description"},"file_name":{"type":"string","maxLength":255,"title":"File Name"},"created_by":{"type":"string","maxLength":255,"title":"Created By"},"created_at":{"type":"string","format":"date-time","title":"Created At"},"disabled":{"type":"boolean","title":"Disabled","default":false},"override":{"type":"boolean","title":"Override","default":true},"condition":{"type":"string","maxLength":2000,"title":"Condition"},"matchers":{"items":{"type":"string"},"type":"array","title":"Matchers"},"rows":{"items":{"type":"object"},"type":"array","title":"Rows"},"updated_by":{"type":"string","maxLength":255,"title":"Updated By"},"last_updated_at":{"type":"string","format":"date-time","title":"Last Updated At"}},"type":"object","required":["tenant_id","name","matchers","rows"],"title":"MappingRule"},"MappingRuleDtoIn":{"properties":{"name":{"type":"string","title":"Name"},"description":{"type":"string","title":"Description"},"file_name":{"type":"string","title":"File Name"},"priority":{"type":"integer","title":"Priority","default":0},"matchers":{"items":{"type":"string"},"type":"array","title":"Matchers"},"rows":{"items":{"type":"object"},"type":"array","title":"Rows"}},"type":"object","required":["name","matchers","rows"],"title":"MappingRuleDtoIn"},"MappingRuleDtoOut":{"properties":{"name":{"type":"string","title":"Name"},"description":{"type":"string","title":"Description"},"file_name":{"type":"string","title":"File Name"},"priority":{"type":"integer","title":"Priority","default":0},"matchers":{"items":{"type":"string"},"type":"array","title":"Matchers"},"id":{"type":"integer","title":"Id"},"created_by":{"type":"string","title":"Created By"},"created_at":{"type":"string","format":"date-time","title":"Created At"},"attributes":{"items":{"type":"string"},"type":"array","title":"Attributes","default":[]},"updated_by":{"type":"string","title":"Updated By"},"last_updated_at":{"type":"string","format":"date-time","title":"Last Updated At"}},"type":"object","required":["name","matchers","id","created_at"],"title":"MappingRuleDtoOut"},"MappingRuleDtoUpdate":{"properties":{"name":{"type":"string","title":"Name"},"description":{"type":"string","title":"Description"},"file_name":{"type":"string","title":"File Name"},"priority":{"type":"integer","title":"Priority","default":0},"matchers":{"items":{"type":"string"},"type":"array","title":"Matchers"},"id":{"type":"integer","title":"Id"},"rows":{"items":{"type":"object"},"type":"array","title":"Rows"}},"type":"object","required":["name","matchers","id"],"title":"MappingRuleDtoUpdate"},"PresetDto":{"properties":{"id":{"type":"string","format":"uuid","title":"Id"},"name":{"type":"string","title":"Name"},"options":{"items":{},"type":"array","title":"Options","default":[]},"created_by":{"type":"string","title":"Created By"},"is_private":{"type":"boolean","title":"Is Private","default":false},"is_noisy":{"type":"boolean","title":"Is Noisy","default":false},"should_do_noise_now":{"type":"boolean","title":"Should Do Noise Now","default":false},"alerts_count":{"type":"integer","title":"Alerts Count","default":0}},"type":"object","required":["id","name"],"title":"PresetDto"},"PresetOption":{"properties":{"label":{"type":"string","title":"Label"},"value":{"anyOf":[{"type":"string"},{"type":"object"}],"title":"Value"}},"type":"object","required":["label","value"],"title":"PresetOption"},"ProviderDTO":{"properties":{"type":{"type":"string","title":"Type"},"id":{"type":"string","title":"Id"},"name":{"type":"string","title":"Name"},"installed":{"type":"boolean","title":"Installed"}},"type":"object","required":["type","name","installed"],"title":"ProviderDTO"},"ProviderWebhookSettings":{"properties":{"webhookDescription":{"type":"string","title":"Webhookdescription"},"webhookTemplate":{"type":"string","title":"Webhooktemplate"},"webhookMarkdown":{"type":"string","title":"Webhookmarkdown"}},"type":"object","required":["webhookTemplate"],"title":"ProviderWebhookSettings"},"RuleCreateDto":{"properties":{"ruleName":{"type":"string","title":"Rulename"},"sqlQuery":{"type":"object","title":"Sqlquery"},"celQuery":{"type":"string","title":"Celquery"},"timeframeInSeconds":{"type":"integer","title":"Timeframeinseconds"},"groupingCriteria":{"items":{},"type":"array","title":"Groupingcriteria","default":[]},"groupDescription":{"type":"string","title":"Groupdescription"}},"type":"object","required":["ruleName","sqlQuery","celQuery","timeframeInSeconds"],"title":"RuleCreateDto"},"SMTPSettings":{"properties":{"host":{"type":"string","title":"Host"},"port":{"type":"integer","title":"Port"},"from_email":{"type":"string","title":"From Email"},"username":{"type":"string","title":"Username"},"password":{"type":"string","format":"password","title":"Password","writeOnly":true},"secure":{"type":"boolean","title":"Secure","default":true},"to_email":{"type":"string","title":"To Email","default":"keep@example.com"}},"type":"object","required":["host","port","from_email"],"title":"SMTPSettings","example":{"host":"smtp.example.com","port":587,"username":"user@example.com","password":"password","secure":true,"from_email":"noreply@example.com","to_email":""}},"SearchAlertsRequest":{"properties":{"query":{"type":"string","title":"Query"},"timeframe":{"type":"integer","title":"Timeframe"}},"type":"object","required":["query","timeframe"],"title":"SearchAlertsRequest"},"User":{"properties":{"email":{"type":"string","title":"Email"},"name":{"type":"string","title":"Name"},"role":{"type":"string","title":"Role"},"picture":{"type":"string","title":"Picture"},"created_at":{"type":"string","title":"Created At"},"last_login":{"type":"string","title":"Last Login"}},"type":"object","required":["email","name","role","created_at"],"title":"User"},"ValidationError":{"properties":{"loc":{"items":{"anyOf":[{"type":"string"},{"type":"integer"}]},"type":"array","title":"Location"},"msg":{"type":"string","title":"Message"},"type":{"type":"string","title":"Error Type"}},"type":"object","required":["loc","msg","type"],"title":"ValidationError"},"WebhookSettings":{"properties":{"webhookApi":{"type":"string","title":"Webhookapi"},"apiKey":{"type":"string","title":"Apikey"},"modelSchema":{"type":"object","title":"Modelschema"}},"type":"object","required":["webhookApi","apiKey","modelSchema"],"title":"WebhookSettings"},"WorkflowCreateOrUpdateDTO":{"properties":{"workflow_id":{"type":"string","title":"Workflow Id"},"status":{"type":"string","enum":["created","updated"],"title":"Status"},"revision":{"type":"integer","title":"Revision","default":1}},"type":"object","required":["workflow_id","status"],"title":"WorkflowCreateOrUpdateDTO"},"WorkflowDTO":{"properties":{"id":{"type":"string","title":"Id"},"name":{"type":"string","title":"Name","default":"Workflow file doesn't contain name"},"description":{"type":"string","title":"Description","default":"Workflow file doesn't contain description"},"created_by":{"type":"string","title":"Created By"},"creation_time":{"type":"string","format":"date-time","title":"Creation Time"},"triggers":{"items":{"type":"object"},"type":"array","title":"Triggers"},"interval":{"type":"integer","title":"Interval"},"last_execution_time":{"type":"string","format":"date-time","title":"Last Execution Time"},"last_execution_status":{"type":"string","title":"Last Execution Status"},"providers":{"items":{"$ref":"#/components/schemas/ProviderDTO"},"type":"array","title":"Providers"},"workflow_raw":{"type":"string","title":"Workflow Raw"},"revision":{"type":"integer","title":"Revision","default":1},"last_updated":{"type":"string","format":"date-time","title":"Last Updated"},"invalid":{"type":"boolean","title":"Invalid","default":false}},"type":"object","required":["id","created_by","creation_time","interval","providers","workflow_raw"],"title":"WorkflowDTO"},"WorkflowExecutionDTO":{"properties":{"id":{"type":"string","title":"Id"},"workflow_id":{"type":"string","title":"Workflow Id"},"started":{"type":"string","format":"date-time","title":"Started"},"triggered_by":{"type":"string","title":"Triggered By"},"status":{"type":"string","title":"Status"},"logs":{"items":{"$ref":"#/components/schemas/WorkflowExecutionLogsDTO"},"type":"array","title":"Logs"},"error":{"type":"string","title":"Error"},"execution_time":{"type":"integer","title":"Execution Time"},"results":{"type":"object","title":"Results"}},"type":"object","required":["id","workflow_id","started","triggered_by","status"],"title":"WorkflowExecutionDTO"},"WorkflowExecutionLogsDTO":{"properties":{"id":{"type":"integer","title":"Id"},"timestamp":{"type":"string","format":"date-time","title":"Timestamp"},"message":{"type":"string","title":"Message"},"context":{"type":"object","title":"Context"}},"type":"object","required":["id","timestamp","message"],"title":"WorkflowExecutionLogsDTO"},"WorkflowToAlertExecutionDTO":{"properties":{"workflow_id":{"type":"string","title":"Workflow Id"},"workflow_execution_id":{"type":"string","title":"Workflow Execution Id"},"alert_fingerprint":{"type":"string","title":"Alert Fingerprint"},"workflow_status":{"type":"string","title":"Workflow Status"},"workflow_started":{"type":"string","format":"date-time","title":"Workflow Started"}},"type":"object","required":["workflow_id","workflow_execution_id","alert_fingerprint","workflow_status","workflow_started"],"title":"WorkflowToAlertExecutionDTO"},"keep__api__routes__settings__CreateUserRequest":{"properties":{"username":{"type":"string","title":"Username"},"password":{"type":"string","title":"Password"},"role":{"type":"string","title":"Role"}},"type":"object","required":["username","role"],"title":"CreateUserRequest"},"keep__api__routes__users__CreateUserRequest":{"properties":{"username":{"type":"string","title":"Username"},"password":{"type":"string","title":"Password"},"role":{"type":"string","title":"Role"}},"type":"object","required":["username","role"],"title":"CreateUserRequest"}},"securitySchemes":{"API Key":{"type":"apiKey","in":"header","name":"X-API-KEY"},"HTTPBasic":{"type":"http","scheme":"basic"},"OAuth2PasswordBearer":{"type":"oauth2","flows":{"password":{"scopes":{},"tokenUrl":"token"}}}}}} \ No newline at end of file +{"openapi": "3.0.2", "info": {"title": "Keep API", "description": "Rest API powering https://platform.keephq.dev and friends \ud83c\udfc4\u200d\u2640\ufe0f", "version": "0.24.5"}, "paths": {"/": {"get": {"summary": "Root", "description": "App desctiption and version.", "operationId": "root__get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}}}, "/providers": {"get": {"tags": ["providers"], "summary": "Get Providers", "operationId": "get_providers_providers_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/export": {"get": {"tags": ["providers"], "summary": "Get Installed Providers", "description": "export all installed providers", "operationId": "get_installed_providers_providers_export_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/{provider_type}/{provider_id}/configured-alerts": {"get": {"tags": ["providers"], "summary": "Get Alerts Configuration", "description": "Get alerts configuration from a provider", "operationId": "get_alerts_configuration_providers__provider_type___provider_id__configured_alerts_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}, {"required": true, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {}, "type": "array", "title": "Response Get Alerts Configuration Providers Provider Type Provider Id Configured Alerts Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/{provider_type}/{provider_id}/logs": {"get": {"tags": ["providers"], "summary": "Get Logs", "description": "Get logs from a provider", "operationId": "get_logs_providers__provider_type___provider_id__logs_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}, {"required": true, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "path"}, {"required": false, "schema": {"type": "integer", "title": "Limit", "default": 5}, "name": "limit", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {}, "type": "array", "title": "Response Get Logs Providers Provider Type Provider Id Logs Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/{provider_type}/schema": {"get": {"tags": ["providers"], "summary": "Get Alerts Schema", "description": "Get the provider's API schema used to push alerts configuration", "operationId": "get_alerts_schema_providers__provider_type__schema_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "object", "title": "Response Get Alerts Schema Providers Provider Type Schema Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/providers/{provider_type}/{provider_id}/alerts/count": {"get": {"tags": ["providers"], "summary": "Get Alert Count", "description": "Get number of alerts a specific provider has received (in a specific time time period or ever)", "operationId": "get_alert_count_providers__provider_type___provider_id__alerts_count_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}, {"required": true, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "path"}, {"required": true, "schema": {"type": "boolean", "title": "Ever"}, "name": "ever", "in": "query"}, {"required": false, "schema": {"type": "string", "format": "date-time", "title": "Start Time"}, "name": "start_time", "in": "query"}, {"required": false, "schema": {"type": "string", "format": "date-time", "title": "End Time"}, "name": "end_time", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/{provider_type}/{provider_id}/alerts": {"post": {"tags": ["providers"], "summary": "Add Alert", "description": "Push new alerts to the provider", "operationId": "add_alert_providers__provider_type___provider_id__alerts_post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}, {"required": true, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "path"}, {"required": false, "schema": {"type": "string", "title": "Alert Id"}, "name": "alert_id", "in": "query"}], "requestBody": {"content": {"application/json": {"schema": {"type": "object", "title": "Alert"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/test": {"post": {"tags": ["providers"], "summary": "Test Provider", "description": "Test a provider's alert retrieval", "operationId": "test_provider_providers_test_post", "requestBody": {"content": {"application/json": {"schema": {"type": "object", "title": "Provider Info"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/{provider_type}/{provider_id}": {"delete": {"tags": ["providers"], "summary": "Delete Provider", "operationId": "delete_provider_providers__provider_type___provider_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}, {"required": true, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/{provider_id}/scopes": {"post": {"tags": ["providers"], "summary": "Validate Provider Scopes", "description": "Validate provider scopes", "operationId": "validate_provider_scopes_providers__provider_id__scopes_post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"additionalProperties": {"anyOf": [{"type": "boolean"}, {"type": "string"}]}, "type": "object", "title": "Response Validate Provider Scopes Providers Provider Id Scopes Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/{provider_id}": {"put": {"tags": ["providers"], "summary": "Update Provider", "description": "Update provider", "operationId": "update_provider_providers__provider_id__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/install": {"post": {"tags": ["providers"], "summary": "Install Provider", "operationId": "install_provider_providers_install_post", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/install/oauth2/{provider_type}": {"post": {"tags": ["providers"], "summary": "Install Provider Oauth2", "operationId": "install_provider_oauth2_providers_install_oauth2__provider_type__post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"type": "object", "title": "Provider Info"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/{provider_id}/invoke/{method}": {"post": {"tags": ["providers"], "summary": "Invoke Provider Method", "description": "Invoke provider special method", "operationId": "invoke_provider_method_providers__provider_id__invoke__method__post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "path"}, {"required": true, "schema": {"type": "string", "title": "Method"}, "name": "method", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"type": "object", "title": "Method Params"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/install/webhook/{provider_type}/{provider_id}": {"post": {"tags": ["providers"], "summary": "Install Provider Webhook", "operationId": "install_provider_webhook_providers_install_webhook__provider_type___provider_id__post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}, {"required": true, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/{provider_type}/webhook": {"get": {"tags": ["providers"], "summary": "Get Webhook Settings", "operationId": "get_webhook_settings_providers__provider_type__webhook_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/ProviderWebhookSettings"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/actions": {"get": {"tags": ["actions"], "summary": "Get Actions", "description": "Get all actions", "operationId": "get_actions_actions_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["actions"], "summary": "Create Actions", "description": "Create new actions by uploading a file", "operationId": "create_actions_actions_post", "requestBody": {"content": {"multipart/form-data": {"schema": {"$ref": "#/components/schemas/Body_create_actions_actions_post"}}}}, "responses": {"201": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/actions/{action_id}": {"put": {"tags": ["actions"], "summary": "Put Action", "description": "Update an action", "operationId": "put_action_actions__action_id__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Action Id"}, "name": "action_id", "in": "path"}], "requestBody": {"content": {"multipart/form-data": {"schema": {"$ref": "#/components/schemas/Body_put_action_actions__action_id__put"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["actions"], "summary": "Delete Action", "description": "Delete an action", "operationId": "delete_action_actions__action_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Action Id"}, "name": "action_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/healthcheck": {"get": {"tags": ["healthcheck"], "summary": "Healthcheck", "description": "simple healthcheck endpoint", "operationId": "healthcheck_healthcheck_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "object", "title": "Response Healthcheck Healthcheck Get"}}}}}}}, "/alerts": {"get": {"tags": ["alerts"], "summary": "Get All Alerts", "description": "Get last alerts occurrence", "operationId": "get_all_alerts_alerts_get", "parameters": [{"required": false, "schema": {"type": "integer", "title": "Limit", "default": 1000}, "name": "limit", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/AlertDto"}, "type": "array", "title": "Response Get All Alerts Alerts Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["alerts"], "summary": "Delete Alert", "description": "Delete alert by finerprint and last received time", "operationId": "delete_alert_alerts_delete", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/DeleteRequestBody"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"additionalProperties": {"type": "string"}, "type": "object", "title": "Response Delete Alert Alerts Delete"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/{fingerprint}/history": {"get": {"tags": ["alerts"], "summary": "Get Alert History", "description": "Get alert history", "operationId": "get_alert_history_alerts__fingerprint__history_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Fingerprint"}, "name": "fingerprint", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/AlertDto"}, "type": "array", "title": "Response Get Alert History Alerts Fingerprint History Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/{fingerprint}/assign/{last_received}": {"post": {"tags": ["alerts"], "summary": "Assign Alert", "description": "Assign alert to user", "operationId": "assign_alert_alerts__fingerprint__assign__last_received__post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Fingerprint"}, "name": "fingerprint", "in": "path"}, {"required": true, "schema": {"type": "string", "title": "Last Received"}, "name": "last_received", "in": "path"}, {"required": false, "schema": {"type": "boolean", "title": "Unassign", "default": false}, "name": "unassign", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"additionalProperties": {"type": "string"}, "type": "object", "title": "Response Assign Alert Alerts Fingerprint Assign Last Received Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/event": {"post": {"tags": ["alerts"], "summary": "Receive Generic Event", "description": "Receive a generic alert event", "operationId": "receive_generic_event_alerts_event_post", "parameters": [{"required": false, "schema": {"type": "string", "title": "Fingerprint"}, "name": "fingerprint", "in": "query"}], "requestBody": {"content": {"application/json": {"schema": {"anyOf": [{"$ref": "#/components/schemas/AlertDto"}, {"items": {"$ref": "#/components/schemas/AlertDto"}, "type": "array"}, {"type": "object"}], "title": "Event"}}}, "required": true}, "responses": {"202": {"description": "Successful Response", "content": {"application/json": {"schema": {"anyOf": [{"$ref": "#/components/schemas/AlertDto"}, {"items": {"$ref": "#/components/schemas/AlertDto"}, "type": "array"}], "title": "Response Receive Generic Event Alerts Event Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/event/netdata": {"get": {"tags": ["alerts"], "summary": "Webhook Challenge", "description": "Helper function to complete Netdata webhook challenge", "operationId": "webhook_challenge_alerts_event_netdata_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}}}, "/alerts/event/{provider_type}": {"post": {"tags": ["alerts"], "summary": "Receive Event", "description": "Receive an alert event from a provider", "operationId": "receive_event_alerts_event__provider_type__post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}, {"required": false, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "query"}, {"required": false, "schema": {"type": "string", "title": "Fingerprint"}, "name": "fingerprint", "in": "query"}], "responses": {"202": {"description": "Successful Response", "content": {"application/json": {"schema": {"additionalProperties": {"type": "string"}, "type": "object", "title": "Response Receive Event Alerts Event Provider Type Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/{fingerprint}": {"get": {"tags": ["alerts"], "summary": "Get Alert", "description": "Get alert by fingerprint", "operationId": "get_alert_alerts__fingerprint__get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Fingerprint"}, "name": "fingerprint", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/AlertDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/enrich": {"post": {"tags": ["alerts"], "summary": "Enrich Alert", "description": "Enrich an alert", "operationId": "enrich_alert_alerts_enrich_post", "parameters": [{"description": "Dispose on new alert", "required": false, "schema": {"type": "boolean", "title": "Dispose On New Alert", "description": "Dispose on new alert", "default": false}, "name": "dispose_on_new_alert", "in": "query"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/EnrichAlertRequestBody"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"additionalProperties": {"type": "string"}, "type": "object", "title": "Response Enrich Alert Alerts Enrich Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/unenrich": {"post": {"tags": ["alerts"], "summary": "Unenrich Alert", "description": "Un-Enrich an alert", "operationId": "unenrich_alert_alerts_unenrich_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/UnEnrichAlertRequestBody"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"additionalProperties": {"type": "string"}, "type": "object", "title": "Response Unenrich Alert Alerts Unenrich Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/search": {"post": {"tags": ["alerts"], "summary": "Search Alerts", "description": "Search alerts", "operationId": "search_alerts_alerts_search_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/SearchAlertsRequest"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/AlertDto"}, "type": "array", "title": "Response Search Alerts Alerts Search Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/audit": {"post": {"tags": ["alerts"], "summary": "Get Multiple Fingerprint Alert Audit", "description": "Get alert timeline audit trail for multiple fingerprints", "operationId": "get_multiple_fingerprint_alert_audit_alerts_audit_post", "requestBody": {"content": {"application/json": {"schema": {"items": {"type": "string"}, "type": "array", "title": "Fingerprints"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/AlertAuditDto"}, "type": "array", "title": "Response Get Multiple Fingerprint Alert Audit Alerts Audit Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/{fingerprint}/audit": {"get": {"tags": ["alerts"], "summary": "Get Alert Audit", "description": "Get alert timeline audit trail", "operationId": "get_alert_audit_alerts__fingerprint__audit_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Fingerprint"}, "name": "fingerprint", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/AlertAuditDto"}, "type": "array", "title": "Response Get Alert Audit Alerts Fingerprint Audit Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/quality/metrics": {"get": {"tags": ["alerts"], "summary": "Get Alert Quality", "description": "Get alert quality", "operationId": "get_alert_quality_alerts_quality_metrics_get", "parameters": [{"required": false, "schema": {"items": {"type": "string"}, "type": "array", "title": "Fields", "default": []}, "name": "fields", "in": "query"}, {"required": false, "schema": {"type": "string", "title": "Time Stamp"}, "name": "time_stamp", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/incidents": {"get": {"tags": ["incidents"], "summary": "Get All Incidents", "description": "Get last incidents", "operationId": "get_all_incidents_incidents_get", "parameters": [{"required": false, "schema": {"type": "boolean", "title": "Confirmed", "default": true}, "name": "confirmed", "in": "query"}, {"required": false, "schema": {"type": "integer", "title": "Limit", "default": 25}, "name": "limit", "in": "query"}, {"required": false, "schema": {"type": "integer", "title": "Offset", "default": 0}, "name": "offset", "in": "query"}, {"required": false, "schema": {"allOf": [{"$ref": "#/components/schemas/IncidentSorting"}], "default": "creation_time"}, "name": "sorting", "in": "query"}, {"required": false, "schema": {"items": {"$ref": "#/components/schemas/IncidentStatus"}, "type": "array"}, "name": "status", "in": "query"}, {"required": false, "schema": {"items": {"$ref": "#/components/schemas/IncidentSeverity"}, "type": "array"}, "name": "severity", "in": "query"}, {"required": false, "schema": {"items": {"type": "string"}, "type": "array", "title": "Assignees"}, "name": "assignees", "in": "query"}, {"required": false, "schema": {"items": {"type": "string"}, "type": "array", "title": "Sources"}, "name": "sources", "in": "query"}, {"required": false, "schema": {"items": {"type": "string"}, "type": "array", "title": "Affected Services"}, "name": "affected_services", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentsPaginatedResultsDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["incidents"], "summary": "Create Incident", "description": "Create new incident", "operationId": "create_incident_incidents_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentDtoIn"}}}, "required": true}, "responses": {"202": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/incidents/meta": {"get": {"tags": ["incidents"], "summary": "Get Incidents Meta", "description": "Get incidents' metadata for filtering", "operationId": "get_incidents_meta_incidents_meta_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentListFilterParamsDto"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/incidents/{incident_id}": {"get": {"tags": ["incidents"], "summary": "Get Incident", "description": "Get incident by id", "operationId": "get_incident_incidents__incident_id__get", "parameters": [{"required": true, "schema": {"type": "string", "format": "uuid", "title": "Incident Id"}, "name": "incident_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "put": {"tags": ["incidents"], "summary": "Update Incident", "description": "Update incident by id", "operationId": "update_incident_incidents__incident_id__put", "parameters": [{"required": true, "schema": {"type": "string", "format": "uuid", "title": "Incident Id"}, "name": "incident_id", "in": "path"}, {"description": "Whether the incident update request was generated by AI", "required": false, "schema": {"type": "boolean", "title": "Generatedbyai", "description": "Whether the incident update request was generated by AI", "default": false}, "name": "generatedByAi", "in": "query"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentDtoIn"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["incidents"], "summary": "Delete Incident", "description": "Delete incident by incident id", "operationId": "delete_incident_incidents__incident_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "format": "uuid", "title": "Incident Id"}, "name": "incident_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/incidents/merge": {"post": {"tags": ["incidents"], "summary": "Merge Incidents", "description": "Merge incidents", "operationId": "merge_incidents_incidents_merge_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/MergeIncidentsRequestDto"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/MergeIncidentsResponseDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/incidents/{incident_id}/alerts": {"get": {"tags": ["incidents"], "summary": "Get Incident Alerts", "description": "Get incident alerts by incident incident id", "operationId": "get_incident_alerts_incidents__incident_id__alerts_get", "parameters": [{"required": true, "schema": {"type": "string", "format": "uuid", "title": "Incident Id"}, "name": "incident_id", "in": "path"}, {"required": false, "schema": {"type": "integer", "title": "Limit", "default": 25}, "name": "limit", "in": "query"}, {"required": false, "schema": {"type": "integer", "title": "Offset", "default": 0}, "name": "offset", "in": "query"}, {"required": false, "schema": {"type": "boolean", "title": "Include Unlinked", "default": false}, "name": "include_unlinked", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/AlertWithIncidentLinkMetadataPaginatedResultsDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["incidents"], "summary": "Add Alerts To Incident", "description": "Add alerts to incident", "operationId": "add_alerts_to_incident_incidents__incident_id__alerts_post", "parameters": [{"required": true, "schema": {"type": "string", "format": "uuid", "title": "Incident Id"}, "name": "incident_id", "in": "path"}, {"required": false, "schema": {"type": "boolean", "title": "Is Created By Ai", "default": false}, "name": "is_created_by_ai", "in": "query"}], "requestBody": {"content": {"application/json": {"schema": {"items": {"type": "string", "format": "uuid"}, "type": "array", "title": "Alert Ids"}}}, "required": true}, "responses": {"202": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/AlertDto"}, "type": "array", "title": "Response Add Alerts To Incident Incidents Incident Id Alerts Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["incidents"], "summary": "Delete Alerts From Incident", "description": "Delete alerts from incident", "operationId": "delete_alerts_from_incident_incidents__incident_id__alerts_delete", "parameters": [{"required": true, "schema": {"type": "string", "format": "uuid", "title": "Incident Id"}, "name": "incident_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"items": {"type": "string", "format": "uuid"}, "type": "array", "title": "Alert Ids"}}}, "required": true}, "responses": {"202": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/AlertDto"}, "type": "array", "title": "Response Delete Alerts From Incident Incidents Incident Id Alerts Delete"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/incidents/{incident_id}/future_incidents": {"get": {"tags": ["incidents"], "summary": "Get Future Incidents For An Incident", "description": "Get same incidents linked to this one", "operationId": "get_future_incidents_for_an_incident_incidents__incident_id__future_incidents_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Incident Id"}, "name": "incident_id", "in": "path"}, {"required": false, "schema": {"type": "integer", "title": "Limit", "default": 25}, "name": "limit", "in": "query"}, {"required": false, "schema": {"type": "integer", "title": "Offset", "default": 0}, "name": "offset", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentsPaginatedResultsDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/incidents/{incident_id}/workflows": {"get": {"tags": ["incidents"], "summary": "Get Incident Workflows", "description": "Get incident workflows by incident id", "operationId": "get_incident_workflows_incidents__incident_id__workflows_get", "parameters": [{"required": true, "schema": {"type": "string", "format": "uuid", "title": "Incident Id"}, "name": "incident_id", "in": "path"}, {"required": false, "schema": {"type": "integer", "title": "Limit", "default": 25}, "name": "limit", "in": "query"}, {"required": false, "schema": {"type": "integer", "title": "Offset", "default": 0}, "name": "offset", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/WorkflowExecutionsPaginatedResultsDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/incidents/event/{provider_type}": {"post": {"tags": ["incidents"], "summary": "Receive Event", "description": "Receive an alert event from a provider", "operationId": "receive_event_incidents_event__provider_type__post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}, {"required": false, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "query"}], "responses": {"202": {"description": "Successful Response", "content": {"application/json": {"schema": {"additionalProperties": {"type": "string"}, "type": "object", "title": "Response Receive Event Incidents Event Provider Type Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/incidents/{incident_id}/status": {"post": {"tags": ["incidents"], "summary": "Change Incident Status", "description": "Change incident status", "operationId": "change_incident_status_incidents__incident_id__status_post", "parameters": [{"required": true, "schema": {"type": "string", "format": "uuid", "title": "Incident Id"}, "name": "incident_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentStatusChangeDto"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/incidents/{incident_id}/comment": {"post": {"tags": ["incidents"], "summary": "Add Comment", "description": "Add incident audit activity", "operationId": "add_comment_incidents__incident_id__comment_post", "parameters": [{"required": true, "schema": {"type": "string", "format": "uuid", "title": "Incident Id"}, "name": "incident_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentStatusChangeDto"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/AlertAudit"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/incidents/ai/suggest": {"post": {"tags": ["incidents"], "summary": "Create With Ai", "description": "Create incident with AI", "operationId": "create_with_ai_incidents_ai_suggest_post", "requestBody": {"content": {"application/json": {"schema": {"items": {"type": "string"}, "type": "array", "title": "Alerts Fingerprints"}}}, "required": true}, "responses": {"202": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentsClusteringSuggestion"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/incidents/ai/{suggestion_id}/commit": {"post": {"tags": ["incidents"], "summary": "Commit With Ai", "description": "Commit incidents with AI and user feedback", "operationId": "commit_with_ai_incidents_ai__suggestion_id__commit_post", "parameters": [{"required": true, "schema": {"type": "string", "format": "uuid", "title": "Suggestion Id"}, "name": "suggestion_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/IncidentCommit"}, "type": "array", "title": "Incidents With Feedback"}}}, "required": true}, "responses": {"202": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/IncidentDto"}, "type": "array", "title": "Response Commit With Ai Incidents Ai Suggestion Id Commit Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/incidents/{incident_id}/confirm": {"post": {"tags": ["incidents"], "summary": "Confirm Incident", "description": "Confirm predicted incident by id", "operationId": "confirm_incident_incidents__incident_id__confirm_post", "parameters": [{"required": true, "schema": {"type": "string", "format": "uuid", "title": "Incident Id"}, "name": "incident_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/settings/webhook": {"get": {"tags": ["settings"], "summary": "Webhook Settings", "description": "Get details about the webhook endpoint (e.g. the API url and an API key)", "operationId": "webhook_settings_settings_webhook_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/WebhookSettings"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/settings/smtp": {"get": {"tags": ["settings"], "summary": "Get Smtp Settings", "description": "Get SMTP settings", "operationId": "get_smtp_settings_settings_smtp_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["settings"], "summary": "Update Smtp Settings", "description": "Install or update SMTP settings", "operationId": "update_smtp_settings_settings_smtp_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/SMTPSettings"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["settings"], "summary": "Delete Smtp Settings", "description": "Delete SMTP settings", "operationId": "delete_smtp_settings_settings_smtp_delete", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/settings/smtp/test": {"post": {"tags": ["settings"], "summary": "Test Smtp Settings", "description": "Test SMTP settings", "operationId": "test_smtp_settings_settings_smtp_test_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/SMTPSettings"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/settings/apikey": {"put": {"tags": ["settings"], "summary": "Update Api Key", "description": "Update API key secret", "operationId": "update_api_key_settings_apikey_put", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["settings"], "summary": "Create Key", "description": "Create API key", "operationId": "create_key_settings_apikey_post", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/settings/apikeys": {"get": {"tags": ["settings"], "summary": "Get Keys", "description": "Get API keys", "operationId": "get_keys_settings_apikeys_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/settings/apikey/{keyId}": {"delete": {"tags": ["settings"], "summary": "Delete Api Key", "description": "Delete API key", "operationId": "delete_api_key_settings_apikey__keyId__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Keyid"}, "name": "keyId", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/settings/sso": {"get": {"tags": ["settings"], "summary": "Get Sso Settings", "operationId": "get_sso_settings_settings_sso_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows": {"get": {"tags": ["workflows", "alerts"], "summary": "Get Workflows", "description": "Get workflows", "operationId": "get_workflows_workflows_get", "parameters": [{"required": false, "schema": {"type": "boolean", "title": "Is V2", "default": false}, "name": "is_v2", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"anyOf": [{"items": {"$ref": "#/components/schemas/WorkflowDTO"}, "type": "array"}, {"items": {"type": "object"}, "type": "array"}], "title": "Response Get Workflows Workflows Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["workflows", "alerts"], "summary": "Create Workflow", "description": "Create or update a workflow", "operationId": "create_workflow_workflows_post", "requestBody": {"content": {"multipart/form-data": {"schema": {"$ref": "#/components/schemas/Body_create_workflow_workflows_post"}}}, "required": true}, "responses": {"201": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/WorkflowCreateOrUpdateDTO"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/export": {"get": {"tags": ["workflows", "alerts"], "summary": "Export Workflows", "description": "export all workflow Yamls", "operationId": "export_workflows_workflows_export_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"type": "string"}, "type": "array", "title": "Response Export Workflows Workflows Export Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/{workflow_id}/run": {"post": {"tags": ["workflows", "alerts"], "summary": "Run Workflow", "description": "Run a workflow", "operationId": "run_workflow_workflows__workflow_id__run_post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Workflow Id"}, "name": "workflow_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"type": "object", "title": "Body"}}}}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "object", "title": "Response Run Workflow Workflows Workflow Id Run Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/test": {"post": {"tags": ["workflows", "alerts"], "summary": "Run Workflow From Definition", "description": "Test run a workflow from a definition", "operationId": "run_workflow_from_definition_workflows_test_post", "requestBody": {"content": {"multipart/form-data": {"schema": {"$ref": "#/components/schemas/Body_run_workflow_from_definition_workflows_test_post"}}}}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "object", "title": "Response Run Workflow From Definition Workflows Test Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/json": {"post": {"tags": ["workflows", "alerts"], "summary": "Create Workflow From Body", "description": "Create or update a workflow", "operationId": "create_workflow_from_body_workflows_json_post", "responses": {"201": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/WorkflowCreateOrUpdateDTO"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/random-templates": {"get": {"tags": ["workflows", "alerts"], "summary": "Get Random Workflow Templates", "description": "Get random workflow templates", "operationId": "get_random_workflow_templates_workflows_random_templates_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"type": "object"}, "type": "array", "title": "Response Get Random Workflow Templates Workflows Random Templates Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/{workflow_id}": {"get": {"tags": ["workflows", "alerts"], "summary": "Get Workflow By Id", "description": "Get workflow by ID", "operationId": "get_workflow_by_id_workflows__workflow_id__get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Workflow Id"}, "name": "workflow_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "put": {"tags": ["workflows", "alerts"], "summary": "Update Workflow By Id", "description": "Update a workflow", "operationId": "update_workflow_by_id_workflows__workflow_id__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Workflow Id"}, "name": "workflow_id", "in": "path"}], "responses": {"201": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/WorkflowCreateOrUpdateDTO"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["workflows", "alerts"], "summary": "Delete Workflow By Id", "description": "Delete workflow", "operationId": "delete_workflow_by_id_workflows__workflow_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Workflow Id"}, "name": "workflow_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/{workflow_id}/raw": {"get": {"tags": ["workflows", "alerts"], "summary": "Get Raw Workflow By Id", "description": "Get workflow executions by ID", "operationId": "get_raw_workflow_by_id_workflows__workflow_id__raw_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Workflow Id"}, "name": "workflow_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "string", "title": "Response Get Raw Workflow By Id Workflows Workflow Id Raw Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/executions": {"get": {"tags": ["workflows", "alerts"], "summary": "Get Workflow Executions By Alert Fingerprint", "description": "Get workflow executions by alert fingerprint", "operationId": "get_workflow_executions_by_alert_fingerprint_workflows_executions_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/WorkflowToAlertExecutionDTO"}, "type": "array", "title": "Response Get Workflow Executions By Alert Fingerprint Workflows Executions Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/{workflow_id}/runs": {"get": {"tags": ["workflows", "alerts"], "summary": "Get Workflow By Id", "description": "Get workflow executions by ID", "operationId": "get_workflow_by_id_workflows__workflow_id__runs_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Workflow Id"}, "name": "workflow_id", "in": "path"}, {"required": false, "schema": {"type": "integer", "title": "Tab", "default": 1}, "name": "tab", "in": "query"}, {"required": false, "schema": {"type": "integer", "title": "Limit", "default": 25}, "name": "limit", "in": "query"}, {"required": false, "schema": {"type": "integer", "title": "Offset", "default": 0}, "name": "offset", "in": "query"}, {"required": false, "schema": {"items": {"type": "string"}, "type": "array", "title": "Status"}, "name": "status", "in": "query"}, {"required": false, "schema": {"items": {"type": "string"}, "type": "array", "title": "Trigger"}, "name": "trigger", "in": "query"}, {"required": false, "schema": {"type": "string", "title": "Execution Id"}, "name": "execution_id", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/WorkflowExecutionsPaginatedResultsDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/{workflow_id}/runs/{workflow_execution_id}": {"get": {"tags": ["workflows", "alerts"], "summary": "Get Workflow Execution Status", "description": "Get a workflow execution status", "operationId": "get_workflow_execution_status_workflows__workflow_id__runs__workflow_execution_id__get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Workflow Execution Id"}, "name": "workflow_execution_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/WorkflowExecutionDTO"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/whoami": {"get": {"tags": ["whoami"], "summary": "Get Tenant Id", "description": "Get tenant id", "operationId": "get_tenant_id_whoami_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "object", "title": "Response Get Tenant Id Whoami Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/pusher/auth": {"post": {"tags": ["pusher"], "summary": "Pusher Authentication", "description": "Authenticate a user to a private channel\n\nArgs:\n request (Request): The request object\n tenant_id (str, optional): The tenant ID. Defaults to Depends(verify_bearer_token).\n pusher_client (Pusher, optional): Pusher client. Defaults to Depends(get_pusher_client).\n\nRaises:\n HTTPException: 403 if the user is not allowed to access the channel.\n\nReturns:\n dict: The authentication response.", "operationId": "pusher_authentication_pusher_auth_post", "requestBody": {"content": {"application/x-www-form-urlencoded": {"schema": {"$ref": "#/components/schemas/Body_pusher_authentication_pusher_auth_post"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "object", "title": "Response Pusher Authentication Pusher Auth Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/status": {"get": {"tags": ["status"], "summary": "Status", "description": "simple status endpoint", "operationId": "status_status_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "object", "title": "Response Status Status Get"}}}}}}}, "/rules": {"get": {"tags": ["rules"], "summary": "Get Rules", "description": "Get Rules", "operationId": "get_rules_rules_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["rules"], "summary": "Create Rule", "description": "Create Rule", "operationId": "create_rule_rules_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/RuleCreateDto"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/rules/{rule_id}": {"put": {"tags": ["rules"], "summary": "Update Rule", "description": "Update Rule", "operationId": "update_rule_rules__rule_id__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["rules"], "summary": "Delete Rule", "description": "Delete Rule", "operationId": "delete_rule_rules__rule_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/preset": {"get": {"tags": ["preset"], "summary": "Get Presets", "description": "Get all presets for tenant", "operationId": "get_presets_preset_get", "parameters": [{"required": false, "schema": {"type": "string", "title": "Time Stamp"}, "name": "time_stamp", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/PresetDto"}, "type": "array", "title": "Response Get Presets Preset Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["preset"], "summary": "Create Preset", "description": "Create a preset for tenant", "operationId": "create_preset_preset_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/CreateOrUpdatePresetDto"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/PresetDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/preset/{uuid}": {"put": {"tags": ["preset"], "summary": "Update Preset", "description": "Update a preset for tenant", "operationId": "update_preset_preset__uuid__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Uuid"}, "name": "uuid", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/CreateOrUpdatePresetDto"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/PresetDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["preset"], "summary": "Delete Preset", "description": "Delete a preset for tenant", "operationId": "delete_preset_preset__uuid__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Uuid"}, "name": "uuid", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/preset/{preset_name}/alerts": {"get": {"tags": ["preset"], "summary": "Get Preset Alerts", "description": "Get the alerts of a preset", "operationId": "get_preset_alerts_preset__preset_name__alerts_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Preset Name"}, "name": "preset_name", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {}, "type": "array", "title": "Response Get Preset Alerts Preset Preset Name Alerts Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/preset/{preset_id}/tab": {"post": {"tags": ["preset"], "summary": "Create Preset Tab", "description": "Create a tab for a preset", "operationId": "create_preset_tab_preset__preset_id__tab_post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Preset Id"}, "name": "preset_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/CreatePresetTab"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/preset/{preset_id}/tab/{tab_id}": {"delete": {"tags": ["preset"], "summary": "Delete Tab", "description": "Delete a tab from a preset", "operationId": "delete_tab_preset__preset_id__tab__tab_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Preset Id"}, "name": "preset_id", "in": "path"}, {"required": true, "schema": {"type": "string", "title": "Tab Id"}, "name": "tab_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/mapping": {"get": {"tags": ["enrichment", "mapping"], "summary": "Get Rules", "description": "Get all mapping rules", "operationId": "get_rules_mapping_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/MappingRuleDtoOut"}, "type": "array", "title": "Response Get Rules Mapping Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["enrichment", "mapping"], "summary": "Create Rule", "description": "Create a new mapping rule", "operationId": "create_rule_mapping_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/MappingRuleDtoIn"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/MappingRule"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/mapping/{rule_id}": {"put": {"tags": ["enrichment", "mapping"], "summary": "Update Rule", "description": "Update an existing rule", "operationId": "update_rule_mapping__rule_id__put", "parameters": [{"required": true, "schema": {"type": "integer", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/MappingRuleDtoIn"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/MappingRuleDtoOut"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["enrichment", "mapping"], "summary": "Delete Rule", "description": "Delete a mapping rule", "operationId": "delete_rule_mapping__rule_id__delete", "parameters": [{"required": true, "schema": {"type": "integer", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/auth/groups": {"get": {"tags": ["auth", "groups"], "summary": "Get Groups", "description": "Get all groups", "operationId": "get_groups_auth_groups_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/Group"}, "type": "array", "title": "Response Get Groups Auth Groups Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["auth", "groups"], "summary": "Create Group", "description": "Create a group", "operationId": "create_group_auth_groups_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/CreateOrUpdateGroupRequest"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/auth/groups/{group_name}": {"put": {"tags": ["auth", "groups"], "summary": "Update Group", "description": "Update a group", "operationId": "update_group_auth_groups__group_name__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Group Name"}, "name": "group_name", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/CreateOrUpdateGroupRequest"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["auth", "groups"], "summary": "Delete Group", "description": "Delete a group", "operationId": "delete_group_auth_groups__group_name__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Group Name"}, "name": "group_name", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/auth/permissions": {"get": {"tags": ["auth", "permissions"], "summary": "Get Permissions", "description": "Get resources permissions", "operationId": "get_permissions_auth_permissions_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/ResourcePermission"}, "type": "array", "title": "Response Get Permissions Auth Permissions Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["auth", "permissions"], "summary": "Create Permissions", "description": "Create permissions for resources", "operationId": "create_permissions_auth_permissions_post", "requestBody": {"content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/ResourcePermission"}, "type": "array", "title": "Resource Permissions", "description": "List of resource permissions"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/auth/permissions/scopes": {"get": {"tags": ["auth", "permissions"], "summary": "Get Scopes", "description": "Get all resources types", "operationId": "get_scopes_auth_permissions_scopes_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"type": "string"}, "type": "array", "title": "Response Get Scopes Auth Permissions Scopes Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/auth/roles": {"get": {"tags": ["auth", "roles"], "summary": "Get Roles", "description": "Get roles", "operationId": "get_roles_auth_roles_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/Role"}, "type": "array", "title": "Response Get Roles Auth Roles Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["auth", "roles"], "summary": "Create Role", "description": "Create role", "operationId": "create_role_auth_roles_post", "requestBody": {"content": {"application/json": {"schema": {"allOf": [{"$ref": "#/components/schemas/CreateOrUpdateRole"}], "title": "Role", "description": "Role"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/auth/roles/{role_id}": {"put": {"tags": ["auth", "roles"], "summary": "Update Role", "description": "Update role", "operationId": "update_role_auth_roles__role_id__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Role Id"}, "name": "role_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"allOf": [{"$ref": "#/components/schemas/CreateOrUpdateRole"}], "title": "Role", "description": "Role"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["auth", "roles"], "summary": "Delete Role", "description": "Delete role", "operationId": "delete_role_auth_roles__role_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Role Id"}, "name": "role_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/auth/users": {"get": {"tags": ["auth", "users"], "summary": "Get Users", "description": "Get all users", "operationId": "get_users_auth_users_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/User"}, "type": "array", "title": "Response Get Users Auth Users Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["auth", "users"], "summary": "Create User", "description": "Create a user", "operationId": "create_user_auth_users_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/CreateUserRequest"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/auth/users/{user_email}": {"put": {"tags": ["auth", "users"], "summary": "Update User", "description": "Update a user", "operationId": "update_user_auth_users__user_email__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "User Email"}, "name": "user_email", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/UpdateUserRequest"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["auth", "users"], "summary": "Delete User", "description": "Delete a user", "operationId": "delete_user_auth_users__user_email__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "User Email"}, "name": "user_email", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/metrics": {"get": {"tags": ["metrics"], "summary": "Get Metrics", "description": "This endpoint is used by Prometheus to scrape such metrics from the application:\n- alerts_total {incident_name, incident_id} - The total number of alerts per incident.\n- open_incidents_total - The total number of open incidents.\n- workflows_executions_total {status} - The total number of workflow executions.\n\nPlease note that those metrics are per-tenant and are not designed to be used for the monitoring of the application itself.\n\nExample prometheus configuration:\n```\nscrape_configs:\n- job_name: \"scrape_keep\"\n scrape_interval: 5m # It's important to scrape not too often to avoid rate limiting.\n static_configs:\n - targets: [\"https://api.keephq.dev\"] # Or your own domain.\n authorization:\n type: Bearer\n credentials: \"{Your API Key}\"\n\n # Optional, you can add labels to exported incidents. \n # Label values will be equal to the last incident's alert payload value matching the label.\n # Attention! Don't add \"flaky\" labels which could change from alert to alert within the same incident.\n # Good labels: ['labels.department', 'labels.team'], bad labels: ['labels.severity', 'labels.pod_id']\n # Check Keep -> Feed -> \"extraPayload\" column, it will help in writing labels.\n\n params:\n labels: ['labels.service', 'labels.queue']\n # Will resuld as: \"labels_service\" and \"labels_queue\".\n```", "operationId": "get_metrics_metrics_get", "parameters": [{"required": false, "schema": {"items": {"type": "string"}, "type": "array", "title": "Labels"}, "name": "labels", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/extraction": {"get": {"tags": ["enrichment", "extraction"], "summary": "Get Extraction Rules", "description": "Get all extraction rules", "operationId": "get_extraction_rules_extraction_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/ExtractionRuleDtoOut"}, "type": "array", "title": "Response Get Extraction Rules Extraction Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["enrichment", "extraction"], "summary": "Create Extraction Rule", "description": "Create a new extraction rule", "operationId": "create_extraction_rule_extraction_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/ExtractionRuleDtoBase"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/ExtractionRuleDtoOut"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/extraction/{rule_id}": {"put": {"tags": ["enrichment", "extraction"], "summary": "Update Extraction Rule", "description": "Update an existing extraction rule", "operationId": "update_extraction_rule_extraction__rule_id__put", "parameters": [{"required": true, "schema": {"type": "integer", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/ExtractionRuleDtoBase"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/ExtractionRuleDtoOut"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["enrichment", "extraction"], "summary": "Delete Extraction Rule", "description": "Delete an extraction rule", "operationId": "delete_extraction_rule_extraction__rule_id__delete", "parameters": [{"required": true, "schema": {"type": "integer", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/dashboard": {"get": {"tags": ["dashboard"], "summary": "Read Dashboards", "operationId": "read_dashboards_dashboard_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/DashboardResponseDTO"}, "type": "array", "title": "Response Read Dashboards Dashboard Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["dashboard"], "summary": "Create Dashboard", "operationId": "create_dashboard_dashboard_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/DashboardCreateDTO"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/DashboardResponseDTO"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/dashboard/{dashboard_id}": {"put": {"tags": ["dashboard"], "summary": "Update Dashboard", "operationId": "update_dashboard_dashboard__dashboard_id__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Dashboard Id"}, "name": "dashboard_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/DashboardUpdateDTO"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/DashboardResponseDTO"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["dashboard"], "summary": "Delete Dashboard", "operationId": "delete_dashboard_dashboard__dashboard_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Dashboard Id"}, "name": "dashboard_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/dashboard/metric-widgets": {"get": {"tags": ["dashboard"], "summary": "Get Metric Widgets", "operationId": "get_metric_widgets_dashboard_metric_widgets_get", "parameters": [{"required": false, "schema": {"type": "boolean", "title": "Mttr", "default": true}, "name": "mttr", "in": "query"}, {"required": false, "schema": {"type": "boolean", "title": "Apd", "default": true}, "name": "apd", "in": "query"}, {"required": false, "schema": {"type": "boolean", "title": "Ipd", "default": true}, "name": "ipd", "in": "query"}, {"required": false, "schema": {"type": "boolean", "title": "Wpd", "default": true}, "name": "wpd", "in": "query"}, {"required": false, "schema": {"type": "string", "title": "Time Stamp"}, "name": "time_stamp", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/tags": {"get": {"tags": ["tags"], "summary": "Get Tags", "description": "get tags", "operationId": "get_tags_tags_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"type": "object"}, "type": "array", "title": "Response Get Tags Tags Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/maintenance": {"get": {"tags": ["maintenance"], "summary": "Get Maintenance Rules", "description": "Get all maintenance rules", "operationId": "get_maintenance_rules_maintenance_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/MaintenanceRuleRead"}, "type": "array", "title": "Response Get Maintenance Rules Maintenance Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["maintenance"], "summary": "Create Maintenance Rule", "description": "Create a new maintenance rule", "operationId": "create_maintenance_rule_maintenance_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/MaintenanceRuleCreate"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/MaintenanceRuleRead"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/maintenance/{rule_id}": {"put": {"tags": ["maintenance"], "summary": "Update Maintenance Rule", "description": "Update an existing maintenance rule", "operationId": "update_maintenance_rule_maintenance__rule_id__put", "parameters": [{"required": true, "schema": {"type": "integer", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/MaintenanceRuleCreate"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/MaintenanceRuleRead"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["maintenance"], "summary": "Delete Maintenance Rule", "description": "Delete a maintenance rule", "operationId": "delete_maintenance_rule_maintenance__rule_id__delete", "parameters": [{"required": true, "schema": {"type": "integer", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/topology": {"get": {"tags": ["topology"], "summary": "Get Topology Data", "description": "Get all topology data", "operationId": "get_topology_data_topology_get", "parameters": [{"required": false, "schema": {"type": "string", "title": "Provider Ids"}, "name": "provider_ids", "in": "query"}, {"required": false, "schema": {"type": "string", "title": "Services"}, "name": "services", "in": "query"}, {"required": false, "schema": {"type": "string", "title": "Environment"}, "name": "environment", "in": "query"}, {"required": false, "schema": {"type": "boolean", "title": "Include Empty Deps", "default": true}, "name": "include_empty_deps", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/TopologyServiceDtoOut"}, "type": "array", "title": "Response Get Topology Data Topology Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/topology/applications": {"get": {"tags": ["topology"], "summary": "Get Applications", "description": "Get all applications", "operationId": "get_applications_topology_applications_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/TopologyApplicationDtoOut"}, "type": "array", "title": "Response Get Applications Topology Applications Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["topology"], "summary": "Create Application", "description": "Create a new application", "operationId": "create_application_topology_applications_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/TopologyApplicationDtoIn"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/TopologyApplicationDtoOut"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/topology/applications/{application_id}": {"put": {"tags": ["topology"], "summary": "Update Application", "description": "Update an application", "operationId": "update_application_topology_applications__application_id__put", "parameters": [{"required": true, "schema": {"type": "string", "format": "uuid", "title": "Application Id"}, "name": "application_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/TopologyApplicationDtoIn"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/TopologyApplicationDtoOut"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["topology"], "summary": "Delete Application", "description": "Delete an application", "operationId": "delete_application_topology_applications__application_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "format": "uuid", "title": "Application Id"}, "name": "application_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/deduplications": {"get": {"tags": ["deduplications"], "summary": "Get Deduplications", "description": "Get Deduplications", "operationId": "get_deduplications_deduplications_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["deduplications"], "summary": "Create Deduplication Rule", "description": "Create Deduplication Rule", "operationId": "create_deduplication_rule_deduplications_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/DeduplicationRuleRequestDto"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/deduplications/fields": {"get": {"tags": ["deduplications"], "summary": "Get Deduplication Fields", "description": "Get Optional Fields For Deduplications", "operationId": "get_deduplication_fields_deduplications_fields_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"additionalProperties": {"items": {"type": "string"}, "type": "array"}, "type": "object", "title": "Response Get Deduplication Fields Deduplications Fields Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/deduplications/{rule_id}": {"put": {"tags": ["deduplications"], "summary": "Update Deduplication Rule", "description": "Update Deduplication Rule", "operationId": "update_deduplication_rule_deduplications__rule_id__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/DeduplicationRuleRequestDto"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["deduplications"], "summary": "Delete Deduplication Rule", "description": "Delete Deduplication Rule", "operationId": "delete_deduplication_rule_deduplications__rule_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}}, "components": {"schemas": {"AlertActionType": {"enum": ["alert was triggered", "alert acknowledged", "alert automatically resolved", "alert automatically resolved by API", "alert manually resolved", "alert status manually changed", "alert status changed by API", "alert status undone", "alert enriched by workflow", "alert enriched by mapping rule", "alert was deduplicated", "alert was assigned with ticket", "alert was unassigned from ticket", "alert ticket was updated", "alert enrichments disposed", "alert deleted", "alert enriched", "alert un-enriched", "a comment was added to the alert", "a comment was removed from the alert", "Alert is in maintenance window", "A comment was added to the incident"], "title": "AlertActionType", "description": "An enumeration."}, "AlertAudit": {"properties": {"id": {"type": "string", "format": "uuid", "title": "Id"}, "fingerprint": {"type": "string", "title": "Fingerprint"}, "tenant_id": {"type": "string", "title": "Tenant Id"}, "timestamp": {"type": "string", "format": "date-time", "title": "Timestamp"}, "user_id": {"type": "string", "title": "User Id"}, "action": {"type": "string", "title": "Action"}, "description": {"type": "string", "title": "Description"}}, "type": "object", "required": ["fingerprint", "tenant_id", "user_id", "action", "description"], "title": "AlertAudit"}, "AlertAuditDto": {"properties": {"id": {"type": "string", "title": "Id"}, "timestamp": {"type": "string", "format": "date-time", "title": "Timestamp"}, "fingerprint": {"type": "string", "title": "Fingerprint"}, "action": {"$ref": "#/components/schemas/AlertActionType"}, "user_id": {"type": "string", "title": "User Id"}, "description": {"type": "string", "title": "Description"}}, "type": "object", "required": ["id", "timestamp", "fingerprint", "action", "user_id", "description"], "title": "AlertAuditDto"}, "AlertDto": {"properties": {"id": {"type": "string", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "status": {"$ref": "#/components/schemas/AlertStatus"}, "severity": {"$ref": "#/components/schemas/AlertSeverity"}, "lastReceived": {"type": "string", "title": "Lastreceived"}, "firingStartTime": {"type": "string", "title": "Firingstarttime"}, "environment": {"type": "string", "title": "Environment", "default": "undefined"}, "isFullDuplicate": {"type": "boolean", "title": "Isfullduplicate", "default": false}, "isPartialDuplicate": {"type": "boolean", "title": "Ispartialduplicate", "default": false}, "duplicateReason": {"type": "string", "title": "Duplicatereason"}, "service": {"type": "string", "title": "Service"}, "source": {"items": {"type": "string"}, "type": "array", "title": "Source", "default": []}, "apiKeyRef": {"type": "string", "title": "Apikeyref"}, "message": {"type": "string", "title": "Message"}, "description": {"type": "string", "title": "Description"}, "pushed": {"type": "boolean", "title": "Pushed", "default": false}, "event_id": {"type": "string", "title": "Event Id"}, "url": {"type": "string", "maxLength": 65536, "minLength": 1, "format": "uri", "title": "Url"}, "labels": {"type": "object", "title": "Labels", "default": {}}, "fingerprint": {"type": "string", "title": "Fingerprint"}, "deleted": {"type": "boolean", "title": "Deleted", "default": false}, "dismissUntil": {"type": "string", "title": "Dismissuntil"}, "dismissed": {"type": "boolean", "title": "Dismissed", "default": false}, "assignee": {"type": "string", "title": "Assignee"}, "providerId": {"type": "string", "title": "Providerid"}, "providerType": {"type": "string", "title": "Providertype"}, "note": {"type": "string", "title": "Note"}, "startedAt": {"type": "string", "title": "Startedat"}, "isNoisy": {"type": "boolean", "title": "Isnoisy", "default": false}, "enriched_fields": {"items": {}, "type": "array", "title": "Enriched Fields", "default": []}, "incident": {"type": "string", "title": "Incident"}}, "type": "object", "required": ["name", "status", "severity", "lastReceived"], "title": "AlertDto", "example": {"id": "1234", "name": "Pod 'api-service-production' lacks memory", "status": "firing", "lastReceived": "2021-01-01T00:00:00.000Z", "environment": "production", "service": "backend", "source": ["prometheus"], "message": "The pod 'api-service-production' lacks memory causing high error rate", "description": "Due to the lack of memory, the pod 'api-service-production' is experiencing high error rate", "severity": "critical", "pushed": true, "url": "https://www.keephq.dev?alertId=1234", "labels": {"pod": "api-service-production", "region": "us-east-1", "cpu": "88", "memory": "100Mi"}, "ticket_url": "https://www.keephq.dev?enrichedTicketId=456", "fingerprint": "1234"}}, "AlertSeverity": {"enum": ["critical", "high", "warning", "info", "low"], "title": "AlertSeverity", "description": "An enumeration."}, "AlertStatus": {"enum": ["firing", "resolved", "acknowledged", "suppressed", "pending"], "title": "AlertStatus", "description": "An enumeration."}, "AlertWithIncidentLinkMetadataDto": {"properties": {"id": {"type": "string", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "status": {"$ref": "#/components/schemas/AlertStatus"}, "severity": {"$ref": "#/components/schemas/AlertSeverity"}, "lastReceived": {"type": "string", "title": "Lastreceived"}, "firingStartTime": {"type": "string", "title": "Firingstarttime"}, "environment": {"type": "string", "title": "Environment", "default": "undefined"}, "isFullDuplicate": {"type": "boolean", "title": "Isfullduplicate", "default": false}, "isPartialDuplicate": {"type": "boolean", "title": "Ispartialduplicate", "default": false}, "duplicateReason": {"type": "string", "title": "Duplicatereason"}, "service": {"type": "string", "title": "Service"}, "source": {"items": {"type": "string"}, "type": "array", "title": "Source", "default": []}, "apiKeyRef": {"type": "string", "title": "Apikeyref"}, "message": {"type": "string", "title": "Message"}, "description": {"type": "string", "title": "Description"}, "pushed": {"type": "boolean", "title": "Pushed", "default": false}, "event_id": {"type": "string", "title": "Event Id"}, "url": {"type": "string", "maxLength": 65536, "minLength": 1, "format": "uri", "title": "Url"}, "labels": {"type": "object", "title": "Labels", "default": {}}, "fingerprint": {"type": "string", "title": "Fingerprint"}, "deleted": {"type": "boolean", "title": "Deleted", "default": false}, "dismissUntil": {"type": "string", "title": "Dismissuntil"}, "dismissed": {"type": "boolean", "title": "Dismissed", "default": false}, "assignee": {"type": "string", "title": "Assignee"}, "providerId": {"type": "string", "title": "Providerid"}, "providerType": {"type": "string", "title": "Providertype"}, "note": {"type": "string", "title": "Note"}, "startedAt": {"type": "string", "title": "Startedat"}, "isNoisy": {"type": "boolean", "title": "Isnoisy", "default": false}, "enriched_fields": {"items": {}, "type": "array", "title": "Enriched Fields", "default": []}, "incident": {"type": "string", "title": "Incident"}, "is_created_by_ai": {"type": "boolean", "title": "Is Created By Ai", "default": false}}, "type": "object", "required": ["name", "status", "severity", "lastReceived"], "title": "AlertWithIncidentLinkMetadataDto", "example": {"id": "1234", "name": "Pod 'api-service-production' lacks memory", "status": "firing", "lastReceived": "2021-01-01T00:00:00.000Z", "environment": "production", "service": "backend", "source": ["prometheus"], "message": "The pod 'api-service-production' lacks memory causing high error rate", "description": "Due to the lack of memory, the pod 'api-service-production' is experiencing high error rate", "severity": "critical", "pushed": true, "url": "https://www.keephq.dev?alertId=1234", "labels": {"pod": "api-service-production", "region": "us-east-1", "cpu": "88", "memory": "100Mi"}, "ticket_url": "https://www.keephq.dev?enrichedTicketId=456", "fingerprint": "1234"}}, "AlertWithIncidentLinkMetadataPaginatedResultsDto": {"properties": {"limit": {"type": "integer", "title": "Limit", "default": 25}, "offset": {"type": "integer", "title": "Offset", "default": 0}, "count": {"type": "integer", "title": "Count"}, "items": {"items": {"$ref": "#/components/schemas/AlertWithIncidentLinkMetadataDto"}, "type": "array", "title": "Items"}}, "type": "object", "required": ["count", "items"], "title": "AlertWithIncidentLinkMetadataPaginatedResultsDto"}, "Body_create_actions_actions_post": {"properties": {"file": {"type": "string", "format": "binary", "title": "File"}}, "type": "object", "title": "Body_create_actions_actions_post"}, "Body_create_workflow_workflows_post": {"properties": {"file": {"type": "string", "format": "binary", "title": "File"}}, "type": "object", "required": ["file"], "title": "Body_create_workflow_workflows_post"}, "Body_pusher_authentication_pusher_auth_post": {"properties": {"channel_name": {"title": "Channel Name"}, "socket_id": {"title": "Socket Id"}}, "type": "object", "required": ["channel_name", "socket_id"], "title": "Body_pusher_authentication_pusher_auth_post"}, "Body_put_action_actions__action_id__put": {"properties": {"file": {"type": "string", "format": "binary", "title": "File"}}, "type": "object", "required": ["file"], "title": "Body_put_action_actions__action_id__put"}, "Body_run_workflow_from_definition_workflows_test_post": {"properties": {"file": {"type": "string", "format": "binary", "title": "File"}}, "type": "object", "title": "Body_run_workflow_from_definition_workflows_test_post"}, "CreateOrUpdateGroupRequest": {"properties": {"name": {"type": "string", "title": "Name"}, "roles": {"items": {"type": "string"}, "type": "array", "title": "Roles"}, "members": {"items": {"type": "string"}, "type": "array", "title": "Members"}}, "type": "object", "required": ["name", "roles", "members"], "title": "CreateOrUpdateGroupRequest"}, "CreateOrUpdatePresetDto": {"properties": {"name": {"type": "string", "title": "Name"}, "options": {"items": {"$ref": "#/components/schemas/PresetOption"}, "type": "array", "title": "Options"}, "is_private": {"type": "boolean", "title": "Is Private", "default": false}, "is_noisy": {"type": "boolean", "title": "Is Noisy", "default": false}, "tags": {"items": {"$ref": "#/components/schemas/TagDto"}, "type": "array", "title": "Tags", "default": []}}, "type": "object", "required": ["options"], "title": "CreateOrUpdatePresetDto"}, "CreateOrUpdateRole": {"properties": {"name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "scopes": {"items": {"type": "string"}, "type": "array", "uniqueItems": true, "title": "Scopes"}}, "type": "object", "title": "CreateOrUpdateRole"}, "CreatePresetTab": {"properties": {"name": {"type": "string", "title": "Name"}, "filter": {"type": "string", "title": "Filter"}}, "type": "object", "required": ["name", "filter"], "title": "CreatePresetTab"}, "CreateUserRequest": {"properties": {"username": {"type": "string", "title": "Username"}, "name": {"type": "string", "title": "Name"}, "password": {"type": "string", "title": "Password"}, "role": {"type": "string", "title": "Role"}, "groups": {"items": {"type": "string"}, "type": "array", "title": "Groups"}}, "type": "object", "required": ["username"], "title": "CreateUserRequest"}, "DashboardCreateDTO": {"properties": {"dashboard_name": {"type": "string", "title": "Dashboard Name"}, "dashboard_config": {"type": "object", "title": "Dashboard Config"}}, "type": "object", "required": ["dashboard_name", "dashboard_config"], "title": "DashboardCreateDTO"}, "DashboardResponseDTO": {"properties": {"id": {"type": "string", "title": "Id"}, "dashboard_name": {"type": "string", "title": "Dashboard Name"}, "dashboard_config": {"type": "object", "title": "Dashboard Config"}, "created_at": {"type": "string", "format": "date-time", "title": "Created At"}, "updated_at": {"type": "string", "format": "date-time", "title": "Updated At"}}, "type": "object", "required": ["id", "dashboard_name", "dashboard_config", "created_at", "updated_at"], "title": "DashboardResponseDTO"}, "DashboardUpdateDTO": {"properties": {"dashboard_config": {"type": "object", "title": "Dashboard Config"}, "dashboard_name": {"type": "string", "title": "Dashboard Name"}}, "type": "object", "title": "DashboardUpdateDTO"}, "DeduplicationRuleRequestDto": {"properties": {"name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "provider_type": {"type": "string", "title": "Provider Type"}, "provider_id": {"type": "string", "title": "Provider Id"}, "fingerprint_fields": {"items": {"type": "string"}, "type": "array", "title": "Fingerprint Fields"}, "full_deduplication": {"type": "boolean", "title": "Full Deduplication", "default": false}, "ignore_fields": {"items": {"type": "string"}, "type": "array", "title": "Ignore Fields"}}, "type": "object", "required": ["name", "provider_type", "fingerprint_fields"], "title": "DeduplicationRuleRequestDto"}, "DeleteRequestBody": {"properties": {"fingerprint": {"type": "string", "title": "Fingerprint"}, "lastReceived": {"type": "string", "title": "Lastreceived"}, "restore": {"type": "boolean", "title": "Restore", "default": false}}, "type": "object", "required": ["fingerprint", "lastReceived"], "title": "DeleteRequestBody"}, "EnrichAlertRequestBody": {"properties": {"enrichments": {"additionalProperties": {"type": "string"}, "type": "object", "title": "Enrichments"}, "fingerprint": {"type": "string", "title": "Fingerprint"}}, "type": "object", "required": ["enrichments", "fingerprint"], "title": "EnrichAlertRequestBody"}, "ExtractionRuleDtoBase": {"properties": {"name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "priority": {"type": "integer", "title": "Priority", "default": 0}, "attribute": {"type": "string", "title": "Attribute"}, "condition": {"type": "string", "title": "Condition"}, "disabled": {"type": "boolean", "title": "Disabled", "default": false}, "regex": {"type": "string", "title": "Regex"}, "pre": {"type": "boolean", "title": "Pre", "default": false}}, "type": "object", "required": ["name", "regex"], "title": "ExtractionRuleDtoBase"}, "ExtractionRuleDtoOut": {"properties": {"name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "priority": {"type": "integer", "title": "Priority", "default": 0}, "attribute": {"type": "string", "title": "Attribute"}, "condition": {"type": "string", "title": "Condition"}, "disabled": {"type": "boolean", "title": "Disabled", "default": false}, "regex": {"type": "string", "title": "Regex"}, "pre": {"type": "boolean", "title": "Pre", "default": false}, "id": {"type": "integer", "title": "Id"}, "created_by": {"type": "string", "title": "Created By"}, "created_at": {"type": "string", "format": "date-time", "title": "Created At"}, "updated_by": {"type": "string", "title": "Updated By"}, "updated_at": {"type": "string", "format": "date-time", "title": "Updated At"}}, "type": "object", "required": ["name", "regex", "id", "created_at"], "title": "ExtractionRuleDtoOut"}, "Group": {"properties": {"id": {"type": "string", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "roles": {"items": {"type": "string"}, "type": "array", "title": "Roles", "default": []}, "members": {"items": {"type": "string"}, "type": "array", "title": "Members", "default": []}, "memberCount": {"type": "integer", "title": "Membercount", "default": 0}}, "type": "object", "required": ["id", "name"], "title": "Group"}, "HTTPValidationError": {"properties": {"detail": {"items": {"$ref": "#/components/schemas/ValidationError"}, "type": "array", "title": "Detail"}}, "type": "object", "title": "HTTPValidationError"}, "IncidentCommit": {"properties": {"accepted": {"type": "boolean", "title": "Accepted"}, "original_suggestion": {"type": "object", "title": "Original Suggestion"}, "changes": {"type": "object", "title": "Changes"}, "incident": {"$ref": "#/components/schemas/IncidentDto"}}, "type": "object", "required": ["accepted", "original_suggestion", "incident"], "title": "IncidentCommit"}, "IncidentDto": {"properties": {"user_generated_name": {"type": "string", "title": "User Generated Name"}, "assignee": {"type": "string", "title": "Assignee"}, "user_summary": {"type": "string", "title": "User Summary"}, "same_incident_in_the_past_id": {"type": "string", "format": "uuid", "title": "Same Incident In The Past Id"}, "id": {"type": "string", "format": "uuid", "title": "Id"}, "start_time": {"type": "string", "format": "date-time", "title": "Start Time"}, "last_seen_time": {"type": "string", "format": "date-time", "title": "Last Seen Time"}, "end_time": {"type": "string", "format": "date-time", "title": "End Time"}, "creation_time": {"type": "string", "format": "date-time", "title": "Creation Time"}, "alerts_count": {"type": "integer", "title": "Alerts Count"}, "alert_sources": {"items": {"type": "string"}, "type": "array", "title": "Alert Sources"}, "severity": {"$ref": "#/components/schemas/IncidentSeverity"}, "status": {"allOf": [{"$ref": "#/components/schemas/IncidentStatus"}], "default": "firing"}, "services": {"items": {"type": "string"}, "type": "array", "title": "Services"}, "is_predicted": {"type": "boolean", "title": "Is Predicted"}, "is_confirmed": {"type": "boolean", "title": "Is Confirmed"}, "generated_summary": {"type": "string", "title": "Generated Summary"}, "ai_generated_name": {"type": "string", "title": "Ai Generated Name"}, "rule_fingerprint": {"type": "string", "title": "Rule Fingerprint"}, "fingerprint": {"type": "string", "title": "Fingerprint"}, "merged_into_incident_id": {"type": "string", "format": "uuid", "title": "Merged Into Incident Id"}, "merged_by": {"type": "string", "title": "Merged By"}, "merged_at": {"type": "string", "format": "date-time", "title": "Merged At"}}, "type": "object", "required": ["id", "alerts_count", "alert_sources", "severity", "services", "is_predicted", "is_confirmed"], "title": "IncidentDto", "example": {"id": "c2509cb3-6168-4347-b83b-a41da9df2d5b", "name": "Incident name", "user_summary": "Keep: Incident description", "status": "firing"}}, "IncidentDtoIn": {"properties": {"user_generated_name": {"type": "string", "title": "User Generated Name"}, "assignee": {"type": "string", "title": "Assignee"}, "user_summary": {"type": "string", "title": "User Summary"}, "same_incident_in_the_past_id": {"type": "string", "format": "uuid", "title": "Same Incident In The Past Id"}}, "type": "object", "title": "IncidentDtoIn", "example": {"id": "c2509cb3-6168-4347-b83b-a41da9df2d5b", "name": "Incident name", "user_summary": "Keep: Incident description", "status": "firing"}}, "IncidentListFilterParamsDto": {"properties": {"statuses": {"items": {"$ref": "#/components/schemas/IncidentStatus"}, "type": "array", "default": ["firing", "resolved", "acknowledged", "merged"]}, "severities": {"items": {"$ref": "#/components/schemas/IncidentSeverity"}, "type": "array", "default": ["critical", "high", "warning", "info", "low"]}, "assignees": {"items": {"type": "string"}, "type": "array", "title": "Assignees"}, "services": {"items": {"type": "string"}, "type": "array", "title": "Services"}, "sources": {"items": {"type": "string"}, "type": "array", "title": "Sources"}}, "type": "object", "required": ["assignees", "services", "sources"], "title": "IncidentListFilterParamsDto"}, "IncidentSeverity": {"enum": ["critical", "high", "warning", "info", "low"], "title": "IncidentSeverity", "description": "An enumeration."}, "IncidentSorting": {"enum": ["creation_time", "start_time", "last_seen_time", "severity", "status", "alerts_count", "-creation_time", "-start_time", "-last_seen_time", "-severity", "-status", "-alerts_count"], "title": "IncidentSorting", "description": "An enumeration."}, "IncidentStatus": {"enum": ["firing", "resolved", "acknowledged", "merged"], "title": "IncidentStatus", "description": "An enumeration."}, "IncidentStatusChangeDto": {"properties": {"status": {"$ref": "#/components/schemas/IncidentStatus"}, "comment": {"type": "string", "title": "Comment"}}, "type": "object", "required": ["status"], "title": "IncidentStatusChangeDto"}, "IncidentsClusteringSuggestion": {"properties": {"incident_suggestion": {"items": {"$ref": "#/components/schemas/IncidentDto"}, "type": "array", "title": "Incident Suggestion"}, "suggestion_id": {"type": "string", "title": "Suggestion Id"}}, "type": "object", "required": ["incident_suggestion", "suggestion_id"], "title": "IncidentsClusteringSuggestion"}, "IncidentsPaginatedResultsDto": {"properties": {"limit": {"type": "integer", "title": "Limit", "default": 25}, "offset": {"type": "integer", "title": "Offset", "default": 0}, "count": {"type": "integer", "title": "Count"}, "items": {"items": {"$ref": "#/components/schemas/IncidentDto"}, "type": "array", "title": "Items"}}, "type": "object", "required": ["count", "items"], "title": "IncidentsPaginatedResultsDto"}, "MaintenanceRuleCreate": {"properties": {"name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "cel_query": {"type": "string", "title": "Cel Query"}, "start_time": {"type": "string", "format": "date-time", "title": "Start Time"}, "duration_seconds": {"type": "integer", "title": "Duration Seconds"}, "suppress": {"type": "boolean", "title": "Suppress", "default": false}, "enabled": {"type": "boolean", "title": "Enabled", "default": true}}, "type": "object", "required": ["name", "cel_query", "start_time"], "title": "MaintenanceRuleCreate"}, "MaintenanceRuleRead": {"properties": {"id": {"type": "integer", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "created_by": {"type": "string", "title": "Created By"}, "cel_query": {"type": "string", "title": "Cel Query"}, "start_time": {"type": "string", "format": "date-time", "title": "Start Time"}, "end_time": {"type": "string", "format": "date-time", "title": "End Time"}, "duration_seconds": {"type": "integer", "title": "Duration Seconds"}, "updated_at": {"type": "string", "format": "date-time", "title": "Updated At"}, "suppress": {"type": "boolean", "title": "Suppress", "default": false}, "enabled": {"type": "boolean", "title": "Enabled", "default": true}}, "type": "object", "required": ["id", "name", "created_by", "cel_query", "start_time", "end_time"], "title": "MaintenanceRuleRead"}, "MappingRule": {"properties": {"id": {"type": "integer", "title": "Id"}, "tenant_id": {"type": "string", "title": "Tenant Id"}, "priority": {"type": "integer", "title": "Priority", "default": 0}, "name": {"type": "string", "maxLength": 255, "title": "Name"}, "description": {"type": "string", "maxLength": 2048, "title": "Description"}, "file_name": {"type": "string", "maxLength": 255, "title": "File Name"}, "created_by": {"type": "string", "maxLength": 255, "title": "Created By"}, "created_at": {"type": "string", "format": "date-time", "title": "Created At"}, "disabled": {"type": "boolean", "title": "Disabled", "default": false}, "override": {"type": "boolean", "title": "Override", "default": true}, "condition": {"type": "string", "maxLength": 2000, "title": "Condition"}, "type": {"type": "string", "maxLength": 255, "title": "Type"}, "matchers": {"items": {"type": "string"}, "type": "array", "title": "Matchers"}, "rows": {"items": {"type": "object"}, "type": "array", "title": "Rows"}, "updated_by": {"type": "string", "maxLength": 255, "title": "Updated By"}, "last_updated_at": {"type": "string", "format": "date-time", "title": "Last Updated At"}}, "type": "object", "required": ["tenant_id", "name", "type", "matchers"], "title": "MappingRule"}, "MappingRuleDtoIn": {"properties": {"name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "file_name": {"type": "string", "title": "File Name"}, "priority": {"type": "integer", "title": "Priority", "default": 0}, "matchers": {"items": {"type": "string"}, "type": "array", "title": "Matchers"}, "type": {"type": "string", "enum": ["csv", "topology"], "title": "Type", "default": "csv"}, "rows": {"items": {"type": "object"}, "type": "array", "title": "Rows"}}, "type": "object", "required": ["name", "matchers"], "title": "MappingRuleDtoIn"}, "MappingRuleDtoOut": {"properties": {"name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "file_name": {"type": "string", "title": "File Name"}, "priority": {"type": "integer", "title": "Priority", "default": 0}, "matchers": {"items": {"type": "string"}, "type": "array", "title": "Matchers"}, "type": {"type": "string", "enum": ["csv", "topology"], "title": "Type", "default": "csv"}, "id": {"type": "integer", "title": "Id"}, "created_by": {"type": "string", "title": "Created By"}, "created_at": {"type": "string", "format": "date-time", "title": "Created At"}, "attributes": {"items": {"type": "string"}, "type": "array", "title": "Attributes", "default": []}, "updated_by": {"type": "string", "title": "Updated By"}, "last_updated_at": {"type": "string", "format": "date-time", "title": "Last Updated At"}}, "type": "object", "required": ["name", "matchers", "id", "created_at"], "title": "MappingRuleDtoOut"}, "MergeIncidentsRequestDto": {"properties": {"source_incident_ids": {"items": {"type": "string", "format": "uuid"}, "type": "array", "title": "Source Incident Ids"}, "destination_incident_id": {"type": "string", "format": "uuid", "title": "Destination Incident Id"}}, "type": "object", "required": ["source_incident_ids", "destination_incident_id"], "title": "MergeIncidentsRequestDto"}, "MergeIncidentsResponseDto": {"properties": {"merged_incident_ids": {"items": {"type": "string", "format": "uuid"}, "type": "array", "title": "Merged Incident Ids"}, "skipped_incident_ids": {"items": {"type": "string", "format": "uuid"}, "type": "array", "title": "Skipped Incident Ids"}, "failed_incident_ids": {"items": {"type": "string", "format": "uuid"}, "type": "array", "title": "Failed Incident Ids"}, "destination_incident_id": {"type": "string", "format": "uuid", "title": "Destination Incident Id"}, "message": {"type": "string", "title": "Message"}}, "type": "object", "required": ["merged_incident_ids", "skipped_incident_ids", "failed_incident_ids", "destination_incident_id", "message"], "title": "MergeIncidentsResponseDto"}, "PermissionEntity": {"properties": {"id": {"type": "string", "title": "Id"}, "type": {"type": "string", "title": "Type"}, "name": {"type": "string", "title": "Name"}}, "type": "object", "required": ["id", "type"], "title": "PermissionEntity"}, "PresetDto": {"properties": {"id": {"type": "string", "format": "uuid", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "options": {"items": {}, "type": "array", "title": "Options", "default": []}, "created_by": {"type": "string", "title": "Created By"}, "is_private": {"type": "boolean", "title": "Is Private", "default": false}, "is_noisy": {"type": "boolean", "title": "Is Noisy", "default": false}, "should_do_noise_now": {"type": "boolean", "title": "Should Do Noise Now", "default": false}, "alerts_count": {"type": "integer", "title": "Alerts Count", "default": 0}, "static": {"type": "boolean", "title": "Static", "default": false}, "tags": {"items": {"$ref": "#/components/schemas/TagDto"}, "type": "array", "title": "Tags", "default": []}}, "type": "object", "required": ["id", "name"], "title": "PresetDto"}, "PresetOption": {"properties": {"label": {"type": "string", "title": "Label"}, "value": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Value"}}, "type": "object", "required": ["label", "value"], "title": "PresetOption"}, "PresetSearchQuery": {"properties": {"cel_query": {"type": "string", "minLength": 0, "title": "Cel Query"}, "sql_query": {"type": "object", "title": "Sql Query"}, "limit": {"type": "integer", "minimum": 0.0, "title": "Limit", "default": 1000}, "timeframe": {"type": "integer", "minimum": 0.0, "title": "Timeframe", "default": 0}}, "type": "object", "required": ["cel_query", "sql_query"], "title": "PresetSearchQuery"}, "ProviderDTO": {"properties": {"type": {"type": "string", "title": "Type"}, "id": {"type": "string", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "installed": {"type": "boolean", "title": "Installed"}}, "type": "object", "required": ["type", "name", "installed"], "title": "ProviderDTO"}, "ProviderWebhookSettings": {"properties": {"webhookDescription": {"type": "string", "title": "Webhookdescription"}, "webhookTemplate": {"type": "string", "title": "Webhooktemplate"}, "webhookMarkdown": {"type": "string", "title": "Webhookmarkdown"}}, "type": "object", "required": ["webhookTemplate"], "title": "ProviderWebhookSettings"}, "ResourcePermission": {"properties": {"resource_id": {"type": "string", "title": "Resource Id"}, "resource_name": {"type": "string", "title": "Resource Name"}, "resource_type": {"type": "string", "title": "Resource Type"}, "permissions": {"items": {"$ref": "#/components/schemas/PermissionEntity"}, "type": "array", "title": "Permissions"}}, "type": "object", "required": ["resource_id", "resource_name", "resource_type", "permissions"], "title": "ResourcePermission"}, "Role": {"properties": {"id": {"type": "string", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "scopes": {"items": {"type": "string"}, "type": "array", "uniqueItems": true, "title": "Scopes"}, "predefined": {"type": "boolean", "title": "Predefined", "default": true}}, "type": "object", "required": ["id", "name", "description", "scopes"], "title": "Role"}, "RuleCreateDto": {"properties": {"ruleName": {"type": "string", "title": "Rulename"}, "sqlQuery": {"type": "object", "title": "Sqlquery"}, "celQuery": {"type": "string", "title": "Celquery"}, "timeframeInSeconds": {"type": "integer", "title": "Timeframeinseconds"}, "timeUnit": {"type": "string", "title": "Timeunit"}, "groupingCriteria": {"items": {}, "type": "array", "title": "Groupingcriteria", "default": []}, "groupDescription": {"type": "string", "title": "Groupdescription"}, "requireApprove": {"type": "boolean", "title": "Requireapprove", "default": false}, "resolveOn": {"type": "string", "title": "Resolveon", "default": "never"}}, "type": "object", "required": ["ruleName", "sqlQuery", "celQuery", "timeframeInSeconds", "timeUnit"], "title": "RuleCreateDto"}, "SMTPSettings": {"properties": {"host": {"type": "string", "title": "Host"}, "port": {"type": "integer", "title": "Port"}, "from_email": {"type": "string", "title": "From Email"}, "username": {"type": "string", "title": "Username"}, "password": {"type": "string", "format": "password", "title": "Password", "writeOnly": true}, "secure": {"type": "boolean", "title": "Secure", "default": true}, "to_email": {"type": "string", "title": "To Email", "default": "keep@example.com"}}, "type": "object", "required": ["host", "port", "from_email"], "title": "SMTPSettings", "example": {"host": "smtp.example.com", "port": 587, "username": "user@example.com", "password": "password", "secure": true, "from_email": "noreply@example.com", "to_email": ""}}, "SearchAlertsRequest": {"properties": {"query": {"$ref": "#/components/schemas/PresetSearchQuery"}, "timeframe": {"type": "integer", "title": "Timeframe"}}, "type": "object", "required": ["query", "timeframe"], "title": "SearchAlertsRequest"}, "TagDto": {"properties": {"id": {"type": "string", "title": "Id"}, "name": {"type": "string", "title": "Name"}}, "type": "object", "required": ["name"], "title": "TagDto"}, "TopologyApplicationDtoIn": {"properties": {"id": {"type": "string", "format": "uuid", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "services": {"items": {"$ref": "#/components/schemas/TopologyServiceDtoIn"}, "type": "array", "title": "Services", "default": []}}, "type": "object", "required": ["name"], "title": "TopologyApplicationDtoIn"}, "TopologyApplicationDtoOut": {"properties": {"id": {"type": "string", "format": "uuid", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "services": {"items": {"$ref": "#/components/schemas/TopologyApplicationServiceDto"}, "type": "array", "title": "Services", "default": []}}, "type": "object", "required": ["id", "name"], "title": "TopologyApplicationDtoOut"}, "TopologyApplicationServiceDto": {"properties": {"id": {"type": "integer", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "service": {"type": "string", "title": "Service"}}, "type": "object", "required": ["id", "name", "service"], "title": "TopologyApplicationServiceDto"}, "TopologyServiceDependencyDto": {"properties": {"serviceId": {"type": "integer", "title": "Serviceid"}, "serviceName": {"type": "string", "title": "Servicename"}, "protocol": {"type": "string", "title": "Protocol", "default": "unknown"}}, "type": "object", "required": ["serviceId", "serviceName"], "title": "TopologyServiceDependencyDto"}, "TopologyServiceDtoIn": {"properties": {"id": {"type": "integer", "title": "Id"}}, "type": "object", "required": ["id"], "title": "TopologyServiceDtoIn"}, "TopologyServiceDtoOut": {"properties": {"source_provider_id": {"type": "string", "title": "Source Provider Id"}, "repository": {"type": "string", "title": "Repository"}, "tags": {"items": {"type": "string"}, "type": "array", "title": "Tags"}, "service": {"type": "string", "title": "Service"}, "display_name": {"type": "string", "title": "Display Name"}, "environment": {"type": "string", "title": "Environment", "default": "unknown"}, "description": {"type": "string", "title": "Description"}, "team": {"type": "string", "title": "Team"}, "email": {"type": "string", "title": "Email"}, "slack": {"type": "string", "title": "Slack"}, "ip_address": {"type": "string", "title": "Ip Address"}, "mac_address": {"type": "string", "title": "Mac Address"}, "category": {"type": "string", "title": "Category"}, "manufacturer": {"type": "string", "title": "Manufacturer"}, "id": {"type": "integer", "title": "Id"}, "dependencies": {"items": {"$ref": "#/components/schemas/TopologyServiceDependencyDto"}, "type": "array", "title": "Dependencies"}, "application_ids": {"items": {"type": "string", "format": "uuid"}, "type": "array", "title": "Application Ids"}, "updated_at": {"type": "string", "format": "date-time", "title": "Updated At"}}, "type": "object", "required": ["service", "display_name", "id", "dependencies", "application_ids"], "title": "TopologyServiceDtoOut"}, "UnEnrichAlertRequestBody": {"properties": {"enrichments": {"items": {"type": "string"}, "type": "array", "title": "Enrichments"}, "fingerprint": {"type": "string", "title": "Fingerprint"}}, "type": "object", "required": ["enrichments", "fingerprint"], "title": "UnEnrichAlertRequestBody"}, "UpdateUserRequest": {"properties": {"username": {"type": "string", "title": "Username"}, "password": {"type": "string", "title": "Password"}, "role": {"type": "string", "title": "Role"}, "groups": {"items": {"type": "string"}, "type": "array", "title": "Groups"}}, "type": "object", "title": "UpdateUserRequest"}, "User": {"properties": {"email": {"type": "string", "title": "Email"}, "name": {"type": "string", "title": "Name"}, "role": {"type": "string", "title": "Role"}, "picture": {"type": "string", "title": "Picture"}, "created_at": {"type": "string", "title": "Created At"}, "last_login": {"type": "string", "title": "Last Login"}, "ldap": {"type": "boolean", "title": "Ldap", "default": false}, "groups": {"items": {"$ref": "#/components/schemas/Group"}, "type": "array", "title": "Groups", "default": []}}, "type": "object", "required": ["email", "name", "created_at"], "title": "User"}, "ValidationError": {"properties": {"loc": {"items": {"anyOf": [{"type": "string"}, {"type": "integer"}]}, "type": "array", "title": "Location"}, "msg": {"type": "string", "title": "Message"}, "type": {"type": "string", "title": "Error Type"}}, "type": "object", "required": ["loc", "msg", "type"], "title": "ValidationError"}, "WebhookSettings": {"properties": {"webhookApi": {"type": "string", "title": "Webhookapi"}, "apiKey": {"type": "string", "title": "Apikey"}, "modelSchema": {"type": "object", "title": "Modelschema"}}, "type": "object", "required": ["webhookApi", "apiKey", "modelSchema"], "title": "WebhookSettings"}, "WorkflowCreateOrUpdateDTO": {"properties": {"workflow_id": {"type": "string", "title": "Workflow Id"}, "status": {"type": "string", "enum": ["created", "updated"], "title": "Status"}, "revision": {"type": "integer", "title": "Revision", "default": 1}}, "type": "object", "required": ["workflow_id", "status"], "title": "WorkflowCreateOrUpdateDTO"}, "WorkflowDTO": {"properties": {"id": {"type": "string", "title": "Id"}, "name": {"type": "string", "title": "Name", "default": "Workflow file doesn't contain name"}, "description": {"type": "string", "title": "Description", "default": "Workflow file doesn't contain description"}, "created_by": {"type": "string", "title": "Created By"}, "creation_time": {"type": "string", "format": "date-time", "title": "Creation Time"}, "triggers": {"items": {"type": "object"}, "type": "array", "title": "Triggers"}, "interval": {"type": "integer", "title": "Interval"}, "disabled": {"type": "boolean", "title": "Disabled", "default": false}, "last_execution_time": {"type": "string", "format": "date-time", "title": "Last Execution Time"}, "last_execution_status": {"type": "string", "title": "Last Execution Status"}, "providers": {"items": {"$ref": "#/components/schemas/ProviderDTO"}, "type": "array", "title": "Providers"}, "workflow_raw": {"type": "string", "title": "Workflow Raw"}, "revision": {"type": "integer", "title": "Revision", "default": 1}, "last_updated": {"type": "string", "format": "date-time", "title": "Last Updated"}, "invalid": {"type": "boolean", "title": "Invalid", "default": false}, "last_executions": {"items": {"type": "object"}, "type": "array", "title": "Last Executions"}, "last_execution_started": {"type": "string", "format": "date-time", "title": "Last Execution Started"}, "provisioned": {"type": "boolean", "title": "Provisioned", "default": false}, "provisioned_file": {"type": "string", "title": "Provisioned File"}}, "type": "object", "required": ["id", "created_by", "creation_time", "providers", "workflow_raw"], "title": "WorkflowDTO"}, "WorkflowExecutionDTO": {"properties": {"id": {"type": "string", "title": "Id"}, "workflow_id": {"type": "string", "title": "Workflow Id"}, "started": {"type": "string", "format": "date-time", "title": "Started"}, "triggered_by": {"type": "string", "title": "Triggered By"}, "status": {"type": "string", "title": "Status"}, "workflow_name": {"type": "string", "title": "Workflow Name"}, "logs": {"items": {"$ref": "#/components/schemas/WorkflowExecutionLogsDTO"}, "type": "array", "title": "Logs"}, "error": {"type": "string", "title": "Error"}, "execution_time": {"type": "number", "title": "Execution Time"}, "results": {"type": "object", "title": "Results"}}, "type": "object", "required": ["id", "workflow_id", "started", "triggered_by", "status"], "title": "WorkflowExecutionDTO"}, "WorkflowExecutionLogsDTO": {"properties": {"id": {"type": "integer", "title": "Id"}, "timestamp": {"type": "string", "format": "date-time", "title": "Timestamp"}, "message": {"type": "string", "title": "Message"}, "context": {"type": "object", "title": "Context"}}, "type": "object", "required": ["id", "timestamp", "message"], "title": "WorkflowExecutionLogsDTO"}, "WorkflowExecutionsPaginatedResultsDto": {"properties": {"limit": {"type": "integer", "title": "Limit", "default": 25}, "offset": {"type": "integer", "title": "Offset", "default": 0}, "count": {"type": "integer", "title": "Count"}, "items": {"items": {"$ref": "#/components/schemas/WorkflowExecutionDTO"}, "type": "array", "title": "Items"}, "passCount": {"type": "integer", "title": "Passcount", "default": 0}, "avgDuration": {"type": "number", "title": "Avgduration", "default": 0.0}, "workflow": {"$ref": "#/components/schemas/WorkflowDTO"}, "failCount": {"type": "integer", "title": "Failcount", "default": 0}}, "type": "object", "required": ["count", "items"], "title": "WorkflowExecutionsPaginatedResultsDto"}, "WorkflowToAlertExecutionDTO": {"properties": {"workflow_id": {"type": "string", "title": "Workflow Id"}, "workflow_execution_id": {"type": "string", "title": "Workflow Execution Id"}, "alert_fingerprint": {"type": "string", "title": "Alert Fingerprint"}, "workflow_status": {"type": "string", "title": "Workflow Status"}, "workflow_started": {"type": "string", "format": "date-time", "title": "Workflow Started"}}, "type": "object", "required": ["workflow_id", "workflow_execution_id", "alert_fingerprint", "workflow_status", "workflow_started"], "title": "WorkflowToAlertExecutionDTO"}}, "securitySchemes": {"API Key": {"type": "apiKey", "in": "header", "name": "X-API-KEY"}, "HTTPBasic": {"type": "http", "scheme": "basic"}, "OAuth2PasswordBearer": {"type": "oauth2", "flows": {"password": {"scopes": {}, "tokenUrl": "token"}}}}}} \ No newline at end of file diff --git a/docs/overview/ai-correlation.mdx b/docs/overview/ai-correlation.mdx new file mode 100644 index 0000000000..6ef25f0039 --- /dev/null +++ b/docs/overview/ai-correlation.mdx @@ -0,0 +1,37 @@ +--- +title: "AI Correlation" +--- + + +Keep Cloud: ✅
+Keep Enterprise On-Premises: ✅
+Keep Open Source: ⛔️ +
+ +Keep's AI correlation engine provides a distinctive approach to fully AI-driven alert correlation. +By using historical alert data as its training dataset, the system intelligently classifies new alerts and assigns them to appropriate incidents. + +The AI correlator runs on cycles, each iteration cycle completes in 5-15 minutes: +1) Model trained based on historical data. +2) Model is evaluated. +3) All unassigned alerts are clustered and added to incidents when their confidence score exceeds the threshold. + +Configuration UI: + + + + +Incident with alerts correlated by AI: + + + + +Check the demo on a playground: https://playground.keephq.dev/ai + +To activate the feature for your on-premises tenant, please [talk to us](https://www.keephq.dev/meet-keep). + +## Frequent questions: + +**Model used:** proprietary model developed and hosted by Keep.
+**Training dataset:** tenant's alerts and incidents.
+**Privacy:** tenant's data is used only for training of the model for the same tenant. Data is not mixed between tenants for training. \ No newline at end of file diff --git a/docs/overview/ai-in-workflows.mdx b/docs/overview/ai-in-workflows.mdx new file mode 100644 index 0000000000..0352d7302c --- /dev/null +++ b/docs/overview/ai-in-workflows.mdx @@ -0,0 +1,30 @@ +--- +title: "AI in Workflows" +--- + + +Keep Cloud: ✅
+Keep Enterprise On-Premises: ✅
+Keep Open Source: ✅ +
+ + + + + +AI in workflows enables you to integrate third-party AI providers as "steps" and "actions" within your workflows. + +Could be useful for: +1. Human input normalization. +2. Routing. +3. Severity definition. +4. Summorization. + +Supported providers include DeepSeek, OpenAI, Anthropic, Grok, Gemini, Ollama, Llama.cpp, vLLM, and more. Check the "AI" filter on the "Providers" page for a complete list. + +Blogpost with examples: https://www.keephq.dev/blog/launch-week-ai-powered-workflows + +## Frequent questions: + +**Model used:** client's own 3'rd party LLM provider. Could be cloud or self-hosted.
+**Privacy:** Data stays within Keep unless it's explicitly processed wia workflow to an explicitly connected 3'rd party provider. Data flow is defined by user. \ No newline at end of file diff --git a/docs/overview/ai-incident-assistant.mdx b/docs/overview/ai-incident-assistant.mdx new file mode 100644 index 0000000000..b01c5c8c94 --- /dev/null +++ b/docs/overview/ai-incident-assistant.mdx @@ -0,0 +1,23 @@ +--- +title: "AI Incident Assistant" +--- + + +Keep Cloud: ✅
+Keep Enterprise On-Premises: ✅
+Keep Open Source: (experimental) +
+ +The AI incident assistant is a chat feature embedded in the incident page. It streamlines all incident context—including +alerts, descriptions, and impacted topology—to the LLM, helping on-call engineers gather information faster and +resolve incidents more efficiently. Users can ask for root cause analysis and even execute commands on third-party +services ([read more about provider methods](/providers/provider-methods#via-ai-assistant)). + + + + + +## Frequent questions: + +**Model used:** OpenAI, a model hosted by Keep, or other.
+**Data flow:** Data is shared between LLM provider and Keep whether the LLM provider may vary depending on the contract. \ No newline at end of file diff --git a/docs/overview/ai-semi-automatic-correlation.mdx b/docs/overview/ai-semi-automatic-correlation.mdx new file mode 100644 index 0000000000..e6d585bc76 --- /dev/null +++ b/docs/overview/ai-semi-automatic-correlation.mdx @@ -0,0 +1,28 @@ +--- +title: "AI Semi Automatic Correlation" +--- + + +Keep Cloud: ✅
+Keep Enterprise On-Premises: ✅
+Keep Open Source: (experimental) +
+ +The Semi-Automatic Incident Engine is a powerful tool designed for teams handling a moderate volume of alerts (fewer than 100 per day). It helps you quickly identify critical issues among numerous alerts—finding the needle in the haystack. + +How to use: + +1. Navigate to the Feed section +2. Select a few alerts +3. Click the "Create Incidents With AI" button + +Once activated, the system will process your alerts through its LLM (Large Language Model) and present you with potential incident candidates for review. + + + + + +## Frequent questions: + +**Model used:** OpenAI, a model hosted by Keep, or other.
+**Data flow:** Data is shared between LLM provider and Keep whether the LLM provider may vary depending on the contract. \ No newline at end of file diff --git a/docs/overview/ai-workflow-assistant.mdx b/docs/overview/ai-workflow-assistant.mdx new file mode 100644 index 0000000000..0b273d079c --- /dev/null +++ b/docs/overview/ai-workflow-assistant.mdx @@ -0,0 +1,31 @@ +--- +title: "AI Workflow Builder Assistant" +--- + + +Keep Cloud: ✅
+Keep Enterprise On-Premises: ✅
+Keep Open Source: (experimental) +
+ + + + + +AI-driven workflow builder (don't confuse it with [AI in workflows](./ai-in-workflows)) is a chat-like UI to build workflows using natural language. +It works in the “human in the loop” paradigm, proposing changes and applying them only after the user's explicit consent. +It simplifies workflow-building routines and helps a broader group of engineers within the organization adopt workflows. + + +Go to "Workflows" -> "+ Create Workflow" to find the AI Assistant: + + + + + +Launch Blogpost: https://www.keephq.dev/blog/launch-week-ai-workflow-builder + +## Frequent questions: + +**Model used:** OpenAI, a model hosted by Keep, or other.
+**Data flow:** Data is shared between LLM provider and Keep whether the LLM provider may vary depending on the contract. \ No newline at end of file diff --git a/docs/platform/alertseverityandstatus.mdx b/docs/overview/alertseverityandstatus.mdx similarity index 87% rename from docs/platform/alertseverityandstatus.mdx rename to docs/overview/alertseverityandstatus.mdx index 6f3bee2b5b..d994e49ac1 100644 --- a/docs/platform/alertseverityandstatus.mdx +++ b/docs/overview/alertseverityandstatus.mdx @@ -4,27 +4,28 @@ title: "Alerts Severity and Status" In Keep, alerts are treated as first-class citizens, with clearly defined severities and statuses to aid in quick and efficient response. + ## Alert Severity Alert severity in Keep is classified into five categories, helping teams prioritize their response based on the urgency and impact of the alert. -| Severity Level | Description | -|----------------|-------------------------------------------------------| -| CRITICAL | Requires immediate action. | -| HIGH | Needs to be addressed soon. | -| WARNING | Indicates a potential problem. | -| INFO | Provides information, no immediate action required. | -| LOW | Minor issues or lowest priority. +| Severity Level | Description | Expected Value | +|----------------|-------------------------------------------------------|----------------| +| CRITICAL | Requires immediate action. | "critical" | +| HIGH | Needs to be addressed soon. | "high" | +| WARNING | Indicates a potential problem. | "warning" | +| INFO | Provides information, no immediate action required. | "info" | +| LOW | Minor issues or lowest priority. | "low" | ## Alert Status The status of an alert in Keep reflects its current state in the alert lifecycle. -| Status | Description | -|--------------|-----------------------------------------------------------------------------| -| FIRING | Active alert indicating an ongoing issue. | -| RESOLVED | The issue has been resolved, and the alert is no longer active. | -| ACKNOWLEDGED | The alert has been acknowledged but not resolved. | -| SUPPRESSED | Alert is suppressed due to various reasons. | -| PENDING | No Data or insufficient data to determine the alert state. | +| Status | Description | Expected Value | +|--------------|-----------------------------------------------------------------------------|----------------| +| FIRING | Active alert indicating an ongoing issue. | "firing" | +| RESOLVED | The issue has been resolved, and the alert is no longer active. | "resolved" | +| ACKNOWLEDGED | The alert has been acknowledged but not resolved. | "acknowledged" | +| SUPPRESSED | Alert is suppressed due to various reasons. | "suppressed" | +| PENDING | No Data or insufficient data to determine the alert state. | "pending" | ## Provider Alert Mappings diff --git a/docs/overview/cel.mdx b/docs/overview/cel.mdx new file mode 100644 index 0000000000..98f1ecafca --- /dev/null +++ b/docs/overview/cel.mdx @@ -0,0 +1,42 @@ +--- +title: "Common Expression Language (CEL)" +--- + + +It worth reading [CEL official docs](https://cel.dev) to learn about the language and its syntax. + + +Keep utilizes **CEL (Common Expression Language)** as a powerful and flexible tool to evaluate and filter alerts against predefined rules. CEL enables users to write precise expressions that define conditions under which alerts are processed, displayed, or acted upon. This capability enhances alert management by allowing granular control over visibility and response to incoming alerts. + +## How Keep Uses CEL + +### Alert Filtering +Alerts are dynamically evaluated against CEL expressions to determine which alerts meet the specified criteria. This real-time filtering ensures only the most relevant alerts are surfaced. + +### Rule Evaluation +CEL expressions can be embedded in rules to enforce specific actions, such as escalating an alert or triggering a workflow. + +### Presets +Users can save frequently used CEL expressions as presets for quick and consistent application across different alert views or teams. + + +## Examples + +### Filter Alerts from a Specific Service + +```cel +service.contains("database") +``` + +### Combine Multiple Conditions + +```cel +severity == "critical" && source == "prometheus" +``` + + +### Exclude Specific Alerts + +```cel +!(service == "auth" && severity == "low") +``` diff --git a/docs/overview/comparison.mdx b/docs/overview/comparison.mdx deleted file mode 100644 index 2329579f33..0000000000 --- a/docs/overview/comparison.mdx +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: "Comparison" ---- - -It's often easier to grasp a tool's features by comparing it to others in the same ecosystem. Here, we'll explain how Keep interacts with and compares to these tools. - -## Keep vs IRM (PagerDuty, OpsGenie, etc.) - -Incident management tools aim to notify the right person at the right time, simplify reporting, and set up efficient war rooms. - -"Keep" focuses on the alert lifecycle, noise reduction, and AI-driven alert-incident correlation. Essentially, Keep acts as an 'intelligent layer before the IRM,' managing millions of alerts before they reach your IRM tool. Keep offers high-quality integrations with PagerDuty, OpsGenie, Grafana OnCall, and more. - -## Keep vs AIOps in Observability (Elastic, Splunk, etc.) - -Keep is different because it’s able to correlate alerts between different observability platforms. - -| | Keep | Alternative | -| ------------------------------------- | -------------------------------------------------------------- | ---------------------------- | -| Aggregates alerts from one platform | ✅ | ✅ | -| Aggregates alerts from mutliple platforms | ✅ | ❌ | -| Correlates alerts between multiple sources | ✅ | ❌ | -| Alerts enrichment | ✅ | ❌ | -| Open source | ✅ | ❌ | -| Workflow automation | ✅ | ❌ | - -## Keep vs AIOps platforms (BigPanda, Moogsoft, etc.) - -Keep is an alternative to platforms like BigPanda and Moogsoft. -Customers who have used both traditional platforms and Keep notice a significant improvement in alert correlation. Unlike the manual methods of other platforms, Keep uses advanced state-of-the-art AI models for easier and more effective alert correlation. - -| | Keep | Alternative | -| ------------------------------------- | -------------------------------------------------------------- | ---------------------------- | -| Aggregation of alerts | ✅ | ✅ | -| Integrations | ✅ (Bi-directional) | ✅ (Webhooks) | -| Alerts enrichment | ✅ | ✅ | -| Open source | ✅ | ❌ | -| Workflow automation | ✅ (GitHub Actions-like, infrastructure as code) | ✅ | -| Managed version | ✅ | ✅ | -| On-Premises | ✅ | ❌ | -| Noise reduction & correlation | ✅ (AI) | ✅ (Rule-based in some cases) | diff --git a/docs/overview/comparisons.mdx b/docs/overview/comparisons.mdx new file mode 100644 index 0000000000..e26f70623a --- /dev/null +++ b/docs/overview/comparisons.mdx @@ -0,0 +1,40 @@ +--- +title: "Comparison" +--- + +It's often easier to grasp a tool's features by comparing it to others in the same ecosystem. Here, we'll explain how Keep interacts with and compares to these tools. + +## Keep vs IRM (PagerDuty, OpsGenie, etc.) + +Incident management tools aim to notify the right person at the right time, simplify reporting, and set up efficient war rooms. + +"Keep" focuses on the alert lifecycle, noise reduction, and AI-driven alert-incident correlation. Essentially, Keep acts as an 'intelligent layer before the IRM,' managing millions of alerts before they reach your IRM tool. Keep offers high-quality integrations with PagerDuty, OpsGenie, Grafana OnCall, and more. + +## Keep vs AIOps in Observability (Elastic, Splunk, etc.) + +Keep is different because it’s able to correlate alerts between different observability platforms. + +| | Keep | Alternative | +| ------------------------------------- | -------------------------------------------------------------- | ---------------------------- | +| Aggregates alerts from one platform | ✅ | ✅ | +| Aggregates alerts from multiple platforms | ✅ | ❌ | +| Correlates alerts between multiple sources | ✅ | ❌ | +| Alerts enrichment | ✅ | ❌ | +| Open source | ✅ | ❌ | +| Workflow automation | ✅ | ❌ | + +## Keep vs AIOps platforms (BigPanda, Moogsoft, etc.) + +Keep is an alternative to platforms like BigPanda and Moogsoft. +Customers who have used both traditional platforms and Keep notice a significant improvement in alert correlation. Unlike the manual methods of other platforms, Keep uses advanced state-of-the-art AI models for easier and more effective alert correlation. + +| | Keep | Alternative | +| ------------------------------------- | -------------------------------------------------------------- | ---------------------------- | +| Aggregation of alerts | ✅ | ✅ | +| Integrations | ✅ (Bi-directional) | ✅ (Webhooks) | +| Alerts enrichment | ✅ | ✅ | +| Open source | ✅ | ❌ | +| Workflow automation | ✅ (GitHub Actions-like, infrastructure as code) | ✅ | +| Managed version | ✅ | ✅ | +| On-Premises | ✅ | ❌ | +| Noise reduction & correlation | ✅ (AI) | ✅ (Rule-based in some cases) | diff --git a/docs/overview/correlation-rules.mdx b/docs/overview/correlation-rules.mdx new file mode 100644 index 0000000000..63583fbe8b --- /dev/null +++ b/docs/overview/correlation-rules.mdx @@ -0,0 +1,85 @@ +--- +title: "Manual Correlation Rules" +--- + +The Keep Correlation Engine is a versatile tool for correlating and consolidating alerts into incidents or incident-candidates. +This guide explains the core concepts, usage, and best practices for effectively utilizing the rule engine. + + + + + + + +## Core Concepts +- **Rule definition**: A rule in Keep is a set of conditions that, when met, creates an incident or incident-candidate. +- **Alert attributes**: These are characteristics or data points of an alert, such as source, severity, or any attribute an alert might have. +- **Conditions and logic**: Rules are built by defining conditions based on alert attributes, using logical operators (like AND/OR) to combine multiple conditions. + +## Creating Correlation Rules +Creating a rule involves defining the conditions under which an alert should be categorized or actions should be grouped. + +1. **Accessing the Correlation Engine**: Navigate to the Correlation section in the Keep platform. +2. **Defining rule criteria**: + - **Name the rule**: Assign a descriptive name that reflects its purpose. + - **Set conditions**: Use alert attributes to create conditions. For example, a rule might specify that an alert with a severity of 'critical' and a source of 'Prometheus' should be categorized as 'High Priority'. + - **Logical grouping**: Combine conditions using logical operators to form comprehensive rules. + - **Manual approve**: Create Incident-candidate or full-fledged incident. + +## Dynamic Incident Naming + +The correlation engine supports dynamic incident naming based on alert attributes. This allows you to create more meaningful and context-aware incident names that reflect the actual alert data. + +### Template Variables + +You can use template variables in your incident name using the `{{ alert.attribute }}` syntax. These variables are replaced with actual values from the alerts. For example: +- `{{alert.labels.host}}` - References the host from alert labels +- `{{alert.service}}` - References the service name from the alert + +### Behavior with Multiple Alerts + +When an incident contains multiple alerts: + +- Values from all alerts are automatically concatenated with commas +- Duplicate values are automatically deduplicated +- If a new alert adds a unique value, the incident name is updated to include it + +#### Dynamic Name Example + +**Template:** "Service Issue on `{{alert.labels.host}}`" + +**First alert** +``` +{ + ... + { + "labels": { + "host": "host1" + } + } + ... +} +``` + +**Second alert** + +``` +{ + ... + { + "labels": { + "host": "host2" + } + } + ... +} +``` + +**Incident Name** + +Service Issue on host1,host2 + +## Examples +- **Metric-based alerts**: Construct a rule to pinpoint alerts associated with specific metrics, such as high CPU usage on servers. This can be achieved by grouping alerts that share a common attribute, like a 'CPU usage' tag, ensuring you quickly identify and address performance issues. +- **Feature-related alerts**: Establish rules to create incident by specific features or services. For instance, you can start incident based on a 'service' or 'URL' tag. This approach is particularly useful for tracking and managing alerts related to distinct functionalities or components within your application. +- **Team-based alert management**: Implement rules to create incidents according to team responsibilities. This might involve grouping based on the systems or services a particular team oversees. Such a strategy ensures that alerts are promptly directed to the appropriate team, enhancing response times and efficiency. diff --git a/docs/overview/correlation-topology.mdx b/docs/overview/correlation-topology.mdx new file mode 100644 index 0000000000..618963a7c4 --- /dev/null +++ b/docs/overview/correlation-topology.mdx @@ -0,0 +1,104 @@ +--- +title: "Topology Correlation" +--- + +The Topology Processor is a core component of Keep that helps correlate alerts based on your infrastructure's topology, creating meaningful incidents that reflect the relationships between your services and applications. +It automatically analyzes incoming alerts and their relationship to your infrastructure topology, creating incidents when multiple related services or components of an application are affected. + +Read more about [Service Topology](/overview/servicetopology). + + + + + + + The Topology Processor is disabled by default. To enable it, set the + environment variable `KEEP_TOPOLOGY_PROCESSOR=true`. + + +## How It Works + +1. **Service Discovery**: The processor maintains a map of your infrastructure's topology, including: + + - Services and their relationships + - Applications and their constituent services + - Dependencies between different components + +2. **Alert Processing**: Every few seconds, the processor: + + - Analyzes recent alerts + - Maps alerts to services in your topology + - Creates or updates incidents based on application-level impact + +3. **Incident Creation**: When multiple services within an application have active alerts: + - Creates a new application-level incident + - Groups related alerts under this incident + - Provides context about the affected application and its services + +## Configuration + +### Environment Variables + +| Variable | Description | Default | +| ------------------------------------------ | --------------------------------------------------- | ------- | +| `KEEP_TOPOLOGY_PROCESSOR` | Enable/disable the topology processor | `false` | +| `KEEP_TOPOLOGY_PROCESSOR_INTERVAL` | Interval for processing alerts (in seconds) | `10` | +| `KEEP_TOPOLOGY_PROCESSOR_LOOK_BACK_WINDOW` | Look back window for alert correlation (in minutes) | `15` | + +## Incident Management + +### Creation + +When the processor detects alerts affecting multiple services within an application: + +- Creates a new incident with type "topology" +- Names it "Application incident: {application_name}" +- Automatically confirms the incident +- Links all related alerts to the incident + +### Resolution + +Incidents can be configured to resolve automatically when: + +- All related alerts are resolved +- Specific resolution criteria are met + +## Best Practices + +1. **Service Mapping** + + - Ensure services in alerts match your topology definitions + - Maintain up-to-date topology information + +2. **Application Definition** + + - Group related services into logical applications + - Define clear service boundaries + +3. **Alert Configuration** + - Include service information in your alerts + - Use consistent service naming across monitoring tools + +## Example + +If you have an application "payment-service" consisting of multiple microservices: + +```json +{ + "application": "payment-service", + "services": ["payment-api", "payment-processor", "payment-database"] +} +``` + +When alerts come in for both `payment-api` and `payment-database`, the Topology Processor will: + +1. Recognize these services belong to the same application +2. Create a single incident for "payment-service" +3. Group both alerts under this incident +4. Provide application-level context in the incident description + +## Limitations + +- Currently supports only application-based incident creation +- One active incident per application at a time +- Requires service information in alerts for correlation diff --git a/docs/overview/deduplication.mdx b/docs/overview/deduplication.mdx new file mode 100644 index 0000000000..718be24ea6 --- /dev/null +++ b/docs/overview/deduplication.mdx @@ -0,0 +1,110 @@ +--- +title: "Deduplication" +--- + +Alert deduplication is a crucial feature in Keep that helps reduce noise and streamline incident management by grouping similar alerts together. This process ensures that your team isn't overwhelmed by a flood of notifications for what is essentially the same issue, allowing for more efficient and focused incident response. + + + + + + +## Glossary + +- **Deduplication Rule**: A set of criteria used to determine if alerts should be grouped together. +- **Partial Deduplication**: Correlates instances of alerts into single alerts, considering the case of the same alert with different statuses (e.g., firing and resolved). This is the default mode where specified fields are used to identify and group related alerts. +- **Fingerprint Fields**: Specific alert attributes used to identify similar alerts. +- **Full Deduplication**: A mode where alerts are considered identical if all fields match exactly (except those explicitly ignored). This helps avoid system overload by discarding duplicate alerts. +- **Ignore Fields**: In full deduplication mode, these are fields that are not considered when comparing alerts. + +## Deduplication Types + +### Partial Deduplication +Partial deduplication allows you to specify certain fields (fingerprint fields) that are used to identify similar alerts. Alerts with matching values in these specified fields are considered duplicates and are grouped together. This method is flexible and allows for fine-tuned control over how alerts are deduplicated. + +Every provider integrated with Keep comes with pre-built partial deduplication rule tailored to that provider's specific alert format and common use cases. +The default fingerprint fields defined using `FINGERPRINT_FIELDS` attributes in the provider code (e.g. [datadog provider](https://github.com/keephq/keep/blob/main/keep/providers/datadog_provider/datadog_provider.py#L188) or [gcp monitoring provider](https://github.com/keephq/keep/blob/main/keep/providers/gcpmonitoring_provider/gcpmonitoring_provider.py#L52)). + +### Full Deduplication +When full deduplication is enabled, Keep will also discard exact same events (excluding ignore fields). This mode considers all fields of an alert when determining duplicates, except for explicitly ignored fields. + +By default, exact similar events excluding lastReceived time are fully deduplicated and discarded. This helps prevent system overload from repeated identical alerts. + +## Real Examples of Alerts and Results + +### Example 1: Partial Deduplication + +**Rule** - Deduplicate based on 'service' and 'error_message' fields. + +```json +# alert 1 +{ + "service": "payment", + "error_message": "Database connection failed", + "severity": "high", + "lastReceived": "2023-05-01T10:00:00Z" +} +# alert 2 +{ + "service": "payment", + "error_message": "Database connection failed", + "severity": "critical", + "lastReceived": "2023-05-01T10:05:00Z" +} +# alert 3 +{ + "service": "auth", + "error_message": "Invalid token", + "severity": "medium", + "lastReceived": "2023-05-01T10:10:00Z" +} +``` + +**Result**: +- Alerts 1 and 2 are deduplicated into a single alert, fields are updated. +- Alert 3 remains separate as it has a different service and error message. + +### Example 2: Full Deduplication + +**Rule**: Full deduplication with 'timestamp' as an ignore field + +**Incoming Alerts**: + +```json + +# alert 1 +{ + service: "api", + error: "Rate limit exceeded", + user_id: "12345", + lastReceived: "2023-05-02T14:00:00Z" +} +# alert 2 (discarded as its identical) +{ + service: "api", + error: "Rate limit exceeded", + user_id: "12345", + lastReceived: "2023-05-02T14:01:00Z" +} +# alert 3 +{ + service: "api", + error: "Rate limit exceeded", + user_id: "67890", + lastReceived: "2023-05-02T14:02:00Z" +} +``` + +**Result**: +- Alerts 1 and 2 are deduplicated as they are identical except for the ignored timestamp field. +- Alert 3 remains separate due to the different user_id. + +## How It Works + +Keep's deduplication process follows these steps: + +1. **Alert Ingestion**: Every alert received by Keep is first ingested into the system. + +2. **Enrichment**: After ingestion, each alert undergoes an enrichment process. This step adds additional context or information to the alert, enhancing its value and usefulness. + +3. **Deduplication**: Following enrichment, Keep's alert deduplicator comes into play. It applies the defined deduplication rules to the enriched alerts. diff --git a/docs/overview/enrichment/extraction.mdx b/docs/overview/enrichment/extraction.mdx index 74e2840ac4..53823e7e10 100644 --- a/docs/overview/enrichment/extraction.mdx +++ b/docs/overview/enrichment/extraction.mdx @@ -2,10 +2,13 @@ title: "Extraction" --- -# Alert Enrichment: Extraction - Keep's Alert Extraction enrichment feature enables dynamic extraction of data from incoming alerts using regular expressions. This powerful tool allows users to define extraction rules that identify and extract data based on patterns, enriching alerts with additional structured data derived directly from alert content. + + + + + ## Introduction Handling a variety of alert formats and extracting relevant information can be challenging. Keep's Alert Extraction feature simplifies this process by allowing users to define regex-based rules that automatically extract key pieces of information from alerts. This capability is crucial for standardizing alert data and enhancing alert context, which facilitates more effective monitoring and response strategies. diff --git a/docs/overview/enrichment/mapping.mdx b/docs/overview/enrichment/mapping.mdx index 6395557021..cd446076dd 100644 --- a/docs/overview/enrichment/mapping.mdx +++ b/docs/overview/enrichment/mapping.mdx @@ -2,30 +2,53 @@ title: "Mapping" --- -# Alert Enrichment: Mapping +Keep's Alert Mapping enrichment feature provides a powerful mechanism for dynamically enhancing alert data by leveraging external data sources, such as CSV files and topology data. This feature allows for the matching of incoming alerts to specific records in a CSV file or topology data based on predefined attributes (matchers) and enriching those alerts with additional information from the matched records. + + + + -Keep's Alert Mapping enrichment feature provides a powerful mechanism for dynamically enhancing alert data by leveraging external data sources, such as CSV files. This feature allows for the matching of incoming alerts to specific records in a CSV file based on predefined attributes (matchers) and enriching those alerts with additional information from the matched records. ## Introduction -In complex monitoring environments, the need to enrich alert data with additional context is critical for effective alert analysis and response. Keep's Alert Mapping and Enrichment enables users to define rules that match alerts to rows in a CSV file, appending or modifying alert attributes with the values from matching rows. This process adds significant value to each alert, providing deeper insights and enabling more precise and informed decision-making. +In complex monitoring environments, the need to enrich alert data with additional context is critical for effective alert analysis and response. Keep's Alert Mapping and Enrichment enables users to define rules that match alerts to rows in a CSV file or topology data, appending or modifying alert attributes with the values from matching rows. This process adds significant value to each alert, providing deeper insights and enabling more precise and informed decision-making. ## How It Works +## Mapping with CSV Files + 1. **Rule Definition**: Users define mapping rules that specify which alert attributes (matchers) should be used for matching alerts to rows in a CSV file. 2. **CSV File Specification**: A CSV file is associated with each mapping rule. This file contains additional data that should be added to alerts matching the rule. 3. **Alert Matching**: When an alert is received, the system checks if it matches the conditions of any mapping rule based on the specified matchers. 4. **Data Enrichment**: If a match is found, the alert is enriched with additional data from the corresponding row in the CSV file. +CVS file will look like: + +| region |responsible_team | severity_override | +|--------------|-----------------|---------------------------------| +| us-east-1 | team-alpha | high | +| us-west-2 | team-beta | medium | +| eu-central-1 | team-gamma | low | + +## Mapping with Topology Data + +1. **Rule Definition**: Users define mapping rules that specify which alert attributes (matchers) should be used for matching alerts to topology data. +2. **Topology Data Specification**: Topology data is associated with each mapping rule. This data contains additional information about the components and their relationships in your environment. +3. **Alert Matching**: When an alert is received, the system checks if it matches the conditions of any mapping rule based on the specified matchers. +4. **Data Enrichment**: If a match is found, the alert is enriched with additional data from the corresponding topology data. + ## Practical Example Imagine you have a CSV file with columns representing different aspects of your infrastructure, such as `region`, `responsible_team`, and `severity_override`. By creating a mapping rule that matches alerts based on `service` and `region`, you can automatically enrich alerts with the responsible team and adjust severity based on the matched row in the CSV file. +Similarly, you can use topology data to enrich alerts. For example, if an alert is related to a specific service, you can use topology data to find related components and their statuses, providing a more comprehensive view of the issue. + ## Core Concepts -- **Matchers**: Attributes within the alert used to identify matching rows within the CSV file. Common matchers include identifiers like `service` or `region`. +- **Matchers**: Attributes within the alert used to identify matching rows within the CSV file or topology data. Common matchers include identifiers like `service` or `region`. - **CSV File**: A structured file containing rows of data. Each column represents a potential attribute that can be added to an alert. -- **Enrichment**: The process of adding new attributes or modifying existing ones in an alert based on the data from a matching CSV row. +- **Topology Data**: Information about the components and their relationships in your environment. This data can be used to enrich alerts with additional context. +- **Enrichment**: The process of adding new attributes or modifying existing ones in an alert based on the data from a matching CSV row or topology data. ## Creating a Mapping Rule @@ -35,13 +58,13 @@ To create an alert mapping and enrichment rule: -1. **Define the Matchers**: Specify which alert attributes will be used to match rows in the CSV file. -2. **Upload the CSV File**: Provide the CSV file containing the data for enrichment. +1. **Define the Matchers**: Specify which alert attributes will be used to match rows in the CSV file or topology data. +2. **Specify the Data Source**: Provide the CSV file or specify the topology data to be used for enrichment. 3. **Configure the Rule**: Set additional parameters, such as whether the rule should override existing alert attributes. ## Best Practices -- **Keep CSV Files Updated**: Regularly update the CSV files to reflect the current state of your infrastructure and operational data. +- **Keep CSV Files and Topology Data Updated**: Regularly update the CSV files and topology data to reflect the current state of your infrastructure and operational data. - **Use Specific Matchers**: Define matchers that are unique and relevant to ensure accurate matching. - **Monitor Rule Performance**: Review the application of mapping rules to ensure they are working as expected and adjust them as necessary. diff --git a/docs/overview/examples.mdx b/docs/overview/examples.mdx deleted file mode 100644 index e5e5641417..0000000000 --- a/docs/overview/examples.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: "Examples" ---- - -Got an interesting example of how would you use Keep? Feel free to submit a
new example issue and we'll credit you when we add it! - - -## Create an incident only if the customer is on Enterprise tier -In this example we will utilze: - -1. Datadog for monitoring -2. OpsGenie for incident managment -3. A postgres database that stores the customer tier. - -This example consists of two steps: -1. Connect your tools - Datadog, OpsGenie and Postgres. -2. Create a workflow that is triggered by the alert, runs an SQL query, and decides whether to create an incident. Once the workflow is created, you can upload it via the [Workflows](/platform/workflows) page. -```yaml -alert: - id: enterprise-tier-alerts - description: Create an incident only if the customer is enterprise. - triggers: - - type: alert - filters: - - key: source - value: datadog - - key: name - value: YourAlertName - steps: - - name: check-if-customer-is-enterprise - provider: - type: postgres - config: "{{ providers.postgres-prod }}" - with: - # Keep will replace {{ alert.customer_id }} with the customer id - query: "SELECT customer_tier, customer_name FROM customers_table WHERE customer_id = {{ alert.customer_id }} LIMIT 1" - actions: - - name: opsgenie-incident - # trigger only if the customer is enterprise - condition: - - name: verify-true - type: assert - assert: "{{ steps.check-if-customer-is-enterprise.results[0] }} == 'enterprise'" - provider: - type: opsgenie - config: " {{ providers.opsgenie-prod }} " - with: - message: "A new alert on enteprise customer ( {{ steps.check-if-customer-is-enterprise.results[1] }} )" -``` - -## Send a slack message for every Cloudwatch alarm -1. Connect your Cloudwatch(/es) and Slack to Keep. -2. Create a simple Workflow that filters for CloudWatch events and sends a Slack message: -```yaml -workflow: - id: cloudwatch-slack - description: Send a slack message when a cloudwatch alarm is triggered - triggers: - - type: alert - filters: - - key: source - value: cloudwatch - actions: - - name: trigger-slack - provider: - type: slack - config: " {{ providers.slack-prod }} " - with: - message: "Got alarm from aws cloudwatch! {{ alert.name }}" - -``` - - -## Monitor a HTTP service -Suppose you want to monitor an HTTP service. -All you have to do is upload the following workflow: - -```yaml -workflow: - id: monitor-http-service - description: Monitor a HTTP service each 10 seconds - triggers: - - type: interval - value: 10 - steps: - - name: simple-http-request - provider: - type: http - with: - method: GET - url: 'https://YOUR_SERVICE_URL/' - timeout: 2 - verify: true - actions: - - name: trigger-slack - condition: - - name: assert-condition - type: assert - assert: '{{ steps.simple-http-request.results.status_code }} == 200' - provider: - type: slack - config: ' {{ providers.slack-prod }} ' - with: - message: "HTTP Request Status: {{ steps.simple-http-request.results.status_code }}\nHTTP Request Body: {{ steps.simple-http-request.results.body }}" - on-failure: - # Just need a provider we can use to send the failure reason - provider: - type: slack - config: ' {{ providers.slack-prod }} ' - -``` diff --git a/docs/overview/faq.mdx b/docs/overview/faq.mdx new file mode 100644 index 0000000000..7bef5230ae --- /dev/null +++ b/docs/overview/faq.mdx @@ -0,0 +1,26 @@ +--- +title: "FAQ" +sidebarTitle: FAQ +--- + +## FAQ + +### 1. "Failed to copy alert/fingerprint. Please check your browser permissions" + +Modern browsers block clipboard access from insecure ("http") origins for security reasons. + +To confirm the root cause of the issue, check your website settings in the browser: + + + +If you see the "Blocked to protect your privacy" message or similar text under clipboard settings, this confirms the error is due to an insecure origin: + + + +To resolve this: + +- For production: Configure HTTPS for your Keep deployment +- For local development: Use "localhost" which browsers treat as a secure origin +- If using a custom domain locally: Enable HTTPS or switch to "localhost" + +If you're accessing Keep from a secure origin and still experiencing this issue, please [reach out](https://slack.keephq.dev) to us. diff --git a/docs/providers/fingerprints.mdx b/docs/overview/fingerprints.mdx similarity index 100% rename from docs/providers/fingerprints.mdx rename to docs/overview/fingerprints.mdx diff --git a/docs/overview/glossary.mdx b/docs/overview/glossary.mdx new file mode 100644 index 0000000000..6dde914d41 --- /dev/null +++ b/docs/overview/glossary.mdx @@ -0,0 +1,50 @@ +--- +title: "Glossary" +--- +## Alert +An alert is an event that is triggered when something bad happens or going to happen. +The term "alert" can sometimes be interchanged with "alarm" (e.g. in CloudWatch) or "monitor" (Datadog). + +## Incident +An incident is a group of alerts that are related to each other. + +## Provider +A provider can be a module that pulls alerts into Keep or pushes data out of keep by interacting with external systems. + +### Provider as a data source +Within the context of a Workflow, a Provider can: +- Query data - query Datadog's API or run a SQL query against a database. +- Push data - send a Slack message or create a PagerDuty incident. + +### Provider as an alert source +When you connect a Provider, Keep begins to read and process alerts from that Provider. For example, after connecting your Prometheus instance, you'll start seeing your Prometheus alerts in Keep. +A Provider can either push alerts into Keep, or Keep can pull alerts from the Provider. + +#### Push alerts to Keep (Manual) +You can configure your alert source to push alerts into Keep. + +For example, consider Prometheus. If you want to push alerts from Prometheus to Keep, you'll need to configure Prometheus Alertmanager to send the alerts to +'https://api.keephq.dev/alerts/event/prometheus' using API key authentication. Each Provider implements Push mechanism and is documented under the specific Provider page. + +#### Push alerts to Keep (Automatic) +In compatible tools, Keep can automatically integrate with the alerting policy of the source tool and add itself as an alert destination. You can learn more about Webhook Integration [here](/providers/overview). +Please note that this will slightly modify your monitors/notification policy. + +### Pull alerts by Keep +Keep also integrates with the alert APIs of various tools and can automatically pull alerts. While pulling is easier to set up (requiring only credentials), pushing is preferable when automation is involved. + +## Workflow +Workflows consist of a list of [Steps](/workflows/overview#steps) and [Actions](/workflows/overview#actions). +A workflow can be triggered in the following ways: +- When an Alert is triggered. +- In a predefined interval. +- Manually. + +Workflows are commonly used to: +1. Enrich your alerts with more context. +2. Automate the response to alert. +3. Create multi-step alerts. + +## API first +Keep is an API-first platform, meaning that anything you can do via the UI can also be accomplished through the [API](https://api.keephq.dev/redoc) +This gives you the flexibility to integrate Keep with your existing stack and to automate alert remediation and enrichment processes. diff --git a/docs/overview/howdoeskeepgetmyalerts.mdx b/docs/overview/howdoeskeepgetmyalerts.mdx new file mode 100644 index 0000000000..ac2c0c93ae --- /dev/null +++ b/docs/overview/howdoeskeepgetmyalerts.mdx @@ -0,0 +1,34 @@ +--- +title: "Push vs Pull alerts" +--- + +There are primarily two ways to get alerts into Keep: + + + + We strongly recommend using the push method for alerting, as pulling does not + include a lot of the features, like workflow automation. It is mainly used for + a quick way to get alerts into Keep and start exploring the value. + + +### Push + +When you connect a [Provider](/providers), Keep automatically instruments the tools to send alerts to Keep via webhook. +As an example, when you connect Grafana, Keep will automatically create a new Webhook contact point in Grafana, and a new Notification Policy to send all alerts to Keep. + +You can configure which providers you want to push from by checking the `Install Webhook` checkbox in the provider settings. + + + + + +### Pull + +When you connect a [Provider](/providers), Keep will start pulling alerts from the tool automatically. +Pulling interval is defined by the `KEEP_PULL_INTERVAL` environment variable and defaults to 7 days (in minutes) and can be completely turned off by using the `KEEP_PULL_DATA_ENABLED` environment variable. + +You can also configure which providers you want to pull from by checking the `Pulling Enabled` checkbox in the provider settings. + + + + diff --git a/docs/overview/introduction.mdx b/docs/overview/introduction.mdx index 958a27caaf..c6f165bba5 100644 --- a/docs/overview/introduction.mdx +++ b/docs/overview/introduction.mdx @@ -1,26 +1,46 @@ --- title: "Introduction" -description: "Keep is an open-source alert management and automation tool that provides everything you need to create and manage alerts effectively." +description: "Keep is an open-source alert management and AIOps platform that is a swiss-knife for alerting, automation, and noise reduction." --- -Start using Keep by logging in to the [platform](https://platform.keephq.dev). -## What's alert? + + Keep has a new playground! Visit the [Playground](https://playground.keephq.dev) to explore its powerful features, experiment with configurations, and test AIOps techniques in a sandbox environment. -An alert is an event that is triggered when something undesirable occurs or is about to occur. -It is usually triggered by monitoring tools such as Prometheus, Grafana, or CloudWatch, and in some cases, proprietary tools. -Alerts usually categorized into three different groups: -- Infrastructure-related alerts - e.g., a virtual machine consumes more than 99% CPU. -- Application-related alerts - e.g., an endpoint starts returning 5XX status codes. -- Business-related alerts - e.g., a drop in the number of sign-ins or purchases. + Once you're ready to start using Keep in your environment, head over to the [Platform](https://platform.keephq.dev) to set up your tenant and get started. Don't forget to join our [Slack community](https://slack.keephq.dev) for help and to share your feedback. + -## What problem does Keep solve? -Keep helps with every step of the alert lifecycle: -1. Maintenance - Keep integrates with your tools, allowing you to manage all of your alerts within a single interface. -2. Noise reduction - By integrating with monitoring tools, Keep can deduplicate and correlate alerts to reduce noise in your organization. Rule-based (all distributions), and AI-based (Keep Enterprise only). -3. Automation - [Keep Workflows](/workflows) enable automated alert enrichment and response. -4. Incident Correlation - Automatically assign alerts to incidents, automatic triaging and root cause analysis (Keep Enterprise only). -5. Summarization - Summary of an incident based on past incidents and the knowledge base (Keep Enterprise only). +## What's AIOps? -## How does Keep integrate with the alerts? -Alerts can either be [pulled](/platform/alerts#pulled-alerts) by Keep or [pushed](/platform/alerts#pushed-alerts) into it. Keep also offers zero-click alert instrumentation through [webhook installation](/platform/providers#webhook-integration). \ No newline at end of file +In simple words, AI for IT Operations (aka AIOps) is about automating repetitive tasks, reducing noise from monitoring tools, and helping teams overcome alert fatigue by turning overwhelming data into actionable insights. + +With AIOps, teams can eliminate noise, prioritize critical issues, and focus on solving real problems rather than constantly firefighting alerts. + +## Why do we build Keep? + +Working with current tools such as BigPanda, Splunk ITSI, or ServiceNow ITOM, we identified a gap: + +- **No Open Source Solution:** We have Grafana for visualization and Prometheus for metrics, but nothing for AIOps. Keep fills this gap as the first open-source solution for AIOps. +- **Not DevOps/SRE Friendly:** Current tools are enterprise-focused but not in a good way. If you're an SRE team lead or head of IT operations in a company with ~100 employees, the existing tools won't work for you. They're too expensive, and their UX requires a dedicated team just for setup and maintenance. Keep is enterprise-ready (scaling, SSO, etc.) but also designed for small teams that want to adopt AIOps practices. +- **A "Post LLM Era" AIOps:** Existing tools were built in a different technical era. Keep is designed to leverage the advancements of the large language model (LLM) era, integrating AI more seamlessly into IT operations. + +## Our Philosophy + +- **Easy to start** – Whether locally or on Kubernetes, we provide one-click solutions like `helm install` and `docker-compose` so you can quickly spin up Keep and start exploring its capabilities. +- **Easy to extend** – Keep is designed with extensibility in mind, making it straightforward to add new integrations or functionality to meet your specific needs. +- **Easy to deploy** – Every aspect of Keep can be provisioned as code, enabling seamless automation of deployments and integration into your CI/CD pipelines. +- **Easy to collaborate** – As an open-source project, we truly believe in the power of community and collaboration. We actively listen to user feedback and strive to continuously improve Keep based on the needs and insights of our users. + +## Our Vision + +Keep is built so every team can benefit from AIOps. + +Whether you're a small team looking for a Kubernetes-local single pane of glass for your Prometheus alerts, or an enterprise with dozens of tools generating alerts and needing to sync with your ServiceNow tickets, Keep is for you. + +Our vision is to democratize AIOps, making it accessible and practical for teams of all sizes. + +## What you should read next + +- [Key Concepts](/overview/glossary): Understand the foundational ideas behind Keep. +- [Use Cases](/overview/usecases): Learn how Keep can solve specific IT operations challenges. +- [Playground](/overview/playground): Explore Keep's playground. diff --git a/docs/overview/keyconcepts.mdx b/docs/overview/keyconcepts.mdx deleted file mode 100644 index 07a3b2dd31..0000000000 --- a/docs/overview/keyconcepts.mdx +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: "Key concepts" ---- -## Alert -Alert is an event that triggered when something bad happens or going to happen. -The term "alert" can sometimes be interchanged with "alarm" (e.g. in CloudWatch) or "monitor" (e.g. in Datadog). - -You can easily initiate a [Workflow](#workflow) when an alert is triggered. - -## Provider -A Provider serves as the building block for input/output in Keep. - -### Provider as a data source -Within the context of a Workflow, a Provider can: -- Query data - query Datadog's API or runs a SQL query against a database. -- Push data - send a Slack message or create a PagerDuty incident. - -### Provider as an alert source -When you connect a Provider, Keep begins to read and process alerts from that Provider. For example, after connecting your Prometheus instance, you'll start seeing your Prometheus alerts in Keep. -A Provider can either push alerts into Keep, or Keep can pull alerts from the Provider. - -#### Push alerts to Keep (Manual) -Configure your alert source to push alerts to Keep. - -For example, consider Promethues. If you want to push alerts from Promethues to Keep, you'll need to configure Promethues Alertmanager to send the alerts to -'https://api.keephq.dev/alerts/event/prometheus' using API key authentication. Each Provider implements Push mechanism and documented under the specific Provider page. - -#### Push alerts to Keep (Automatic) -In compatible tools, Keep can automatically integrate with the alerting policy of the source and add itself as an alert destination. You can learn more about Webhook Integration [here](/platform/providers#webhook-integration). -Please note that this will slightly modify your monitors/notification policy to include Keep. - -### Pull alerts by Keep -Keep also integrates with the alert APIs of various tools and can automatically pull alerts. While pulling is easier to set up (requiring only credentials), pushing is preferable when automation is involved. - -## Workflow -Workflows consist of a list of [Steps](/workflows/overview#steps) and [Actions](/workflows/overview#actions). -A workflow can be triggered in the following ways: -- When Alert is triggered. -- In a predefined interval. -- Manually. - -Workflows are commonly used to: -1. Enrich your alerts with more context. -2. Automate the response to alert. -3. Create multi-steps alerts. - -## API first -Keep is an API-first platform, meaning that anything you can do via the [UI](ui/) can also be accomplished through the [API](api/) -This gives you the flexibility to integrate Keep with your existing stack and to automate alert remediation and enrichment processes. diff --git a/docs/overview/maintenance-windows.mdx b/docs/overview/maintenance-windows.mdx new file mode 100644 index 0000000000..85bd4bac81 --- /dev/null +++ b/docs/overview/maintenance-windows.mdx @@ -0,0 +1,101 @@ +--- +title: "Maintenance Windows" +--- + +Keep's Maintenance Windows feature provides a critical mechanism for managing alert noise during scheduled maintenance periods or other planned events. By defining Maintenance Window rules, users can suppress alerts that are irrelevant during these times, ensuring that only actionable alerts reach the operations team. + + + + + + + +## Introduction + +In dynamic IT environments, it's common to have periods where certain alerts are expected and should not trigger incident responses. Keep's Maintenance Windows feature allows users to define specific rules that temporarily suppress alerts based on various conditions, such as time windows or alert attributes. This helps prevent unnecessary alert fatigue and ensures that teams can focus on critical issues. + +## How It Works + +1. **Maintenance Window Rule Definition**: Users define Maintenance Window rules specifying the conditions under which alerts should be suppressed. +2. **Condition Specification**: A CEL (Common Expression Language) query is associated with each Maintenance Window rule to define the conditions for suppression. +3. **Time Window Configuration**: Maintenance Window rules can be set for specific start and end times, or based on a relative duration. +4. **Alert Suppression**: During the active period of a Maintenance Window rule, any alerts matching the defined conditions are either suppressed and **not shown in alerts feed** or shown in the feed in suppressed status (**this is configurable**). + +## Practical Example + +Suppose your team schedules a database upgrade that could trigger numerous non-critical alerts. You can create a Maintenance Window rule that suppresses alerts from the database service during the upgrade window. This ensures that your operations team isn't overwhelmed by non-actionable alerts, allowing them to focus on more critical issues. + +## Core Concepts + +- **Maintenance Window Rules**: Configurations that define when and which alerts should be suppressed based on time windows and conditions. +- **CEL Query**: A query language used to specify the conditions under which alerts should be suppressed. For example, a CEL query might suppress alerts where the source is a specific service during a maintenance window. +- **Time Window**: The specific start and end times or relative duration during which the Maintenance Window rule is active. +- **Alert Suppression**: The process of ignoring alerts that match the Maintenance Window rule's conditions during the specified time window. + +## Status-Based Filtering in Maintenance Windows + +In Keep, certain alert statuses are automatically ignored by Maintenance Window rules. Specifically, alerts with the statuses RESOLVED and ACKNOWLEDGED are not suppressed by Maintenance Window rules. This is intentional to ensure that resolving alerts can still be processed and appropriately close or update active incidents. + +### Why Are Some Statuses Ignored? + + • RESOLVED Alerts: These alerts indicate that an issue has been resolved. By allowing these alerts to bypass Maintenance Window rules, Keep ensures that any active incidents related to the alert can be properly closed, maintaining the integrity of the alert lifecycle. + + • ACKNOWLEDGED Alerts: These alerts have been acknowledged by an operator, signaling that they are being addressed. Ignoring these alerts in Maintenance Windows ensures that operators can track the progress of incidents and take necessary actions without interference. + +By excluding these statuses from Maintenance Window suppression, Keep allows for the continuous and accurate management of alerts, even during Maintenance Window periods, ensuring that resolution processes are not disrupted. + +## Creating a Maintenance Window Rule + +To create a Maintenance Window rule: + + + + + +1. **Define the Maintenance Window Name and Description**: Provide a name and optional description for the Maintenance Window rule to easily identify its purpose. +2. **Specify the CEL Query**: Use CEL to define the conditions under which alerts should be suppressed (e.g., `source == "database"`). +3. **Set the Time Window**: Choose a specific start and end time, or define a relative duration for the Maintenance Window. +4. **Enable the Rule**: Decide whether the rule should be active immediately or scheduled for future use. + +## Best Practices + +- **Plan Maintenance Windows in Advance**: Schedule Maintenance Window periods in advance for known maintenance windows to prevent unnecessary alerts. +- **Use Specific Conditions**: Define precise CEL queries to ensure only the intended alerts are suppressed. +- **Review and Update Maintenance Windows**: Regularly review active Maintenance Window rules to ensure they are still relevant and adjust them as necessary. + + +## Strategies + +In order to handle the alerts during Maintenance Windows, Keep provides some Strategies to handle how these alerts are treated: + +### 1. Default + +The default behaviour of Maintenance Windows is to **Suppressed** alerts that match the defined conditions. + +### 2. Recover status + +This strategy relies on the following premise: + + An alert received inside the Maintenance Window must be inhibited and once the Maintenance Window is over, the alert must recover its previous flow. + + +The following actions will therefore be taken with a new alert: +- When an alert is received, it will be checked against the Maintenance Window rules. +- If the alert matches any Maintenance Window rule, its status will be set to **Maintenance**. +- Workflows and Incidents handling are skipped. + +Every WATCHER_LAPSED_TIME seconds, the watcher will check whether there is any active Maintenance Window for every alert with a Maintenance status. +If so, the following actions will be taken: +- The alert will swap its status, and previous status. +- Workflows, Incidents handling, Pusher and Presets notifications will be launched in the same way as a new alert. + +#### 2.1 What is an expired Maintenance Window? + +For a maintenance window to be considered expired, the following conditions must be met: +- The **End Time** must be earlier than the current time. +- The **Enabled** flag must be set to **False**. + +#### 2.2 What are the specific conditions to use the Recover Status Strategy? +- Set **MAINTENANCE_WINDOW_STRATEGY** environment variable to **recover_previous_status**. +- "Alerts will show in suppressed status" option must be set to **True** in the Maintenance Window rule configuration. +- **Enabled** flag must be set to **True** in the Maintenance Window rule configuration. diff --git a/docs/overview/playground.mdx b/docs/overview/playground.mdx new file mode 100644 index 0000000000..9a26f99fff --- /dev/null +++ b/docs/overview/playground.mdx @@ -0,0 +1,39 @@ +--- +title: "Playground" +description: "Dive into Keep's [sandbox environment](https://playground.keephq.dev) to experience the full range of its AIOps capabilities." +--- + + + + + + +Use Keep's [playground](https://playground.keephq.dev) to explore, experiment, and understand how Keep streamlines operations and reduces noise, enabling you to gain clarity and control over your IT ecosystem. + +What to look at: +- [Alerts](#alerts) +- [Incidents](#incidents) +- [Providers](#providers) +- [Workflows](#workflows) +- [AIOps Techniques](#aiops-techniques) + + +## Alerts + +Get a single pane of glass view for all your alerts with customizable presets. Use CEL (Common Expression Language) syntax for precise filtering, configure the alerts table layout to match your workflow, and explore facets for quick insights into alert patterns and metrics. + +## Incidents + +Examine incidents in detail, including their associated alerts and timelines. Test correlation logic and mapping configurations that group related alerts into incidents, and validate your suppression or resolution strategies. + +## Providers + +Integrate with external data sources or alert providers like Prometheus, Datadog, or GCP Monitoring. Configure and test mappings to ensure proper ingestion and normalization of data from various sources into Keep's unified schema. + +## Workflows + +Build and test automated workflows to manage alerts and incidents with precision. Experiment with both an intuitive UI builder and advanced scripting capabilities to trigger actions, notifications, or external integrations based on dynamic conditions. + +## AIOps Techniques + +Test and refine deduplication, enrichment, mapping, and extraction rules to optimize alert handling. Experiment with these techniques to transform raw alerts into actionable data and reduce noise effectively. diff --git a/docs/overview/ruleengine.mdx b/docs/overview/ruleengine.mdx deleted file mode 100644 index 37135acfd8..0000000000 --- a/docs/overview/ruleengine.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "Alert grouping" ---- - -The Keep Rule Engine is a versatile tool for grouping and consolidating alerts. -This guide explains the core concepts, usage, and best practices for effectively utilizing the rule engine. - -Access the Rule Engine UI through the Keep platform by navigating to the Rule Builder section. - -## Core Concepts -- **Rule definition**: A rule in Keep is a set of conditions that, when met, creates an alert group. -- **Alert attributes**: These are characteristics or data points of an alert, such as source, severity, or any attribute an alert might have. -- **Conditions and logic**: Rules are built by defining conditions based on alert attributes, using logical operators (like AND/OR) to combine multiple conditions. - -## Creating Rules -Creating a rule involves defining the conditions under which an alert should be categorized or actions should be grouped. - -1. **Accessing the Rule Engine**: Navigate to the Rule Engine section in the Keep platform. -2. **Defining rule criteria**: - - **Name the rule**: Assign a descriptive name that reflects its purpose. - - **Set conditions**: Use alert attributes to create conditions. For example, a rule might specify that an alert with a severity of 'critical' and a source of 'Prometheus' should be categorized as 'High Priority'. - - **Logical grouping**: Combine conditions using logical operators to form comprehensive rules. - -## Examples -- **Metric-based alerts**: Construct a rule to pinpoint alerts associated with specific metrics, such as high CPU usage on servers. This can be achieved by grouping alerts that share a common attribute, like a 'CPU usage' tag, ensuring you quickly identify and address performance issues. -- **Feature-related alerts**: Establish rules to organize alerts by specific features or services. For instance, you can group alerts based on a 'service' or 'URL' tag. This approach is particularly useful for tracking and managing alerts related to distinct functionalities or components within your application. -- **Team-based alert management**: Implement rules to categorize alerts according to team responsibilities. This might involve grouping alerts based on the systems or services a particular team oversees. Such a strategy ensures that alerts are promptly directed to the appropriate team, enhancing response times and efficiency. diff --git a/docs/overview/servicetopology.mdx b/docs/overview/servicetopology.mdx new file mode 100644 index 0000000000..0beafedf6f --- /dev/null +++ b/docs/overview/servicetopology.mdx @@ -0,0 +1,270 @@ +--- +title: "Service Topology" +--- + +The Service Topology feature in Keep provides a visual representation of your service dependencies, allowing you to quickly understand the relationships between various components in your system. By mapping services and their interactions, you can gain insights into how issues in one service may impact others, enabling faster root-cause analysis and more effective incident resolution. + + + + + +## Key Concepts + +- **Nodes**: Represent individual services, applications, or infrastructure components. +- **Edges**: Show the dependencies and interactions between nodes. + +## Supported Providers + + + + } + > + + } + > + + } + > + + } + > + + } + > + + +## Features + +### Visualizing Dependencies + +The service topology graph helps you: + +- Identify critical dependencies between services. +- Understand how failures in one service propagate through the system. +- Highlight single points of failure or bottlenecks. + +### Real-Time Health Indicators + +Nodes and edges are enriched with health indicators derived from alerts and metrics. This allows you to: + +- Quickly spot issues in your architecture. +- Prioritize incident resolution based on affected dependencies. + +### Filter and Focus + +Use filters to focus on specific parts of the topology, such as: + +- A particular environment (e.g., production, staging). +- A service group (e.g., all database-related services). +- Alerts of a specific severity or type. + +### Incident Integration + +Service topology integrates seamlessly with Keep’s incident management features. When an incident is triggered, you can: + +- View the affected nodes and their dependencies directly on the topology graph. +- Analyze how alerts related to the incident are propagating through the system. +- Use this information to guide remediation efforts. + + +### Manually adding Topology + +This features allows you to create and manipulate your services and the dependencies between them. + +- Click on `+ Add Node` to add a new service to your map. + + + + +- Field `Service` and `Display Name` are mandatory fields and rest of the fields are optional. (Note: `Tags` accepts CSV) +- Click `Save`, this adds a new service to your map. + + + + +- You can add multiple such services and add connections/dependencies between them. +- You can select on or more manually created services (holding Ctrl select multiple services), and delete them all at once using the `Delete Services` option. + + + + +- You can click any service and use `Update Service` button to update a service. + + + + +- To add a dependency drag from any service's right handle (source) to another service's left handle (target). + + + + +- You can remove a dependency by dragging away a dependency from it's target handle and leave it. + + + + +- To add a protocol to your dependency: click the dependency > Click `Edit Dependency` > Fill in the protocol in the popup > Click `OK`. + + + + + + + + + + + +- You can only manipulate the services that are created manually. +- Creating or updating a dependency is only possible between two manually created services. + + + +### Importing and Exporting topology + +You can Import/Export topology data: services + applications + dependencies to/from keep using this feature. + +- Click the menu item to get the Import/Export option. + + + + +- Data is Imported and Exported in YAML Format. +- Below is a sample YAML: +```yaml +applications: +- description: 'A sample application for monitoring and management' + id: 398e7b9a-bc0f-487a-b6d7-049a16e500e4 + name: monitoring-app + repository: 'https://github.com/sample-org/monitoring-app' + services: + - 556041 + - 556061 +dependencies: +- depends_on_service_id: 556051 + id: 6219 + protocol: HTTP + service_id: 556041 +- depends_on_service_id: 556081 + id: 6220 + protocol: HTTPS + service_id: 556051 +- depends_on_service_id: 556041 + id: 6221 + protocol: GRPC + service_id: 556061 +- depends_on_service_id: 556071 + id: 6222 + protocol: TCP + service_id: 556061 +- depends_on_service_id: 556051 + id: 6223 + protocol: UDP + service_id: 556071 +services: +- id: 556041 + display_name: Auth Service + service: PAH3VXB + category: Backend + description: 'Handles user authentication and session management' + email: 'auth-team@example.com' + environment: production + ip_address: '192.168.1.10' + is_manual: false + mac_address: '00:1A:2B:3C:4D:5E' + manufacturer: 'Dell' + namespace: 'auth' + repository: 'https://github.com/sample-org/auth-service' + slack: '#auth-alerts' + source_provider_id: ebe062c4814f483cb2c5d556fbb9395c + tags: ['authentication', 'security'] + team: 'Auth Team' +- id: 556051 + display_name: Log Aggregator + service: PFRKUOO + category: Monitoring + description: 'Main service responsible for collecting and aggregating logs' + email: 'logs-team@example.com' + environment: staging + ip_address: '192.168.1.11' + is_manual: false + mac_address: '00:1A:2B:3C:4D:5F' + manufacturer: 'HP' + namespace: 'logs' + repository: 'https://github.com/sample-org/log-aggregator' + slack: '#logs-alerts' + source_provider_id: ebe062c4814f483cb2c5d556fbb9395c + tags: ['monitoring', 'logging'] + team: 'Logs Team' +- id: 556061 + display_name: Core API + service: PWKXGRK + category: API + description: 'Main business logic service for processing user data' + email: 'backend-team@example.com' + environment: production + ip_address: '192.168.1.12' + is_manual: false + mac_address: '00:1A:2B:3C:4D:60' + manufacturer: 'Cisco' + namespace: 'api' + repository: 'https://github.com/sample-org/core-api' + slack: '#backend-alerts' + source_provider_id: ebe062c4814f483cb2c5d556fbb9395c + tags: ['api', 'backend'] + team: 'Backend Team' +- id: 556071 + display_name: Database Service + service: PFEIHAU + category: Storage + description: 'Handles database operations and caching' + email: 'db-team@example.com' + environment: production + ip_address: '192.168.1.13' + is_manual: false + mac_address: '00:1A:2B:3C:4D:61' + manufacturer: 'IBM' + namespace: 'db' + repository: 'https://github.com/sample-org/database-service' + slack: '#db-alerts' + source_provider_id: ebe062c4814f483cb2c5d556fbb9395c + tags: ['database', 'storage'] + team: 'Database Team' +- id: 556081 + display_name: Service Mesh + service: PC8HHE7 + category: Infrastructure + description: 'Handles networking and service discovery' + email: 'infra-team@example.com' + environment: production + ip_address: '192.168.1.14' + is_manual: false + mac_address: '00:1A:2B:3C:4D:62' + manufacturer: 'Juniper' + namespace: 'mesh' + repository: 'https://github.com/sample-org/service-mesh' + slack: '#infra-alerts' + source_provider_id: ebe062c4814f483cb2c5d556fbb9395c + tags: ['networking', 'mesh'] + team: 'Infra Team' +``` \ No newline at end of file diff --git a/docs/overview/support.mdx b/docs/overview/support.mdx new file mode 100644 index 0000000000..edc35c1944 --- /dev/null +++ b/docs/overview/support.mdx @@ -0,0 +1,16 @@ +--- +title: "Support" +sidebarTitle: Support +--- + +## Overview +You can use the following methods to ask for support/help with anything related with Keep: + + + + You can use the [Keep Slack community](https://slack.keephq.dev) to get support. + + + You can use support@keephq.dev to send inquiries. + + diff --git a/docs/overview/usecases.mdx b/docs/overview/usecases.mdx index b63e425a72..1b63006da4 100644 --- a/docs/overview/usecases.mdx +++ b/docs/overview/usecases.mdx @@ -1,28 +1,61 @@ --- -title: "Use cases" +title: "Use Cases" --- -## Central Alerts management -No more navigating between multiple Prometheus instances and dealing with per-region, per-account CloudWatch settings. +Keep is a versatile platform that adapts to the needs of various roles and scenarios in IT operations. -By linking your alert-triggering tools to Keep, you gain a centralized dashboard for managing all your alerts. +Whether you're a DevOps engineer managing infrastructure, an SRE ensuring uptime, or a NOC team lead handling alert noise, Keep provides tailored solutions. +The platform also addresses a broad range of use cases, from centralizing alert management to automating responses and ensuring SLA compliance. Explore how Keep can simplify your workflows and improve operational efficiency, no matter your role or challenge. -With Keep, you can review, throttle, mute, and fine-tune all of your alerts from a single console. +--- + +## By Role + +### For DevOps +Keep enables DevOps engineers to centralize alert management, automate responses, and fine-tune alert configurations. With integrations to tools like Prometheus and Grafana, you can streamline monitoring workflows, reduce noise, and focus on delivering reliable infrastructure. + +### For SREs +Site Reliability Engineers can benefit from Keep’s ability to correlate alerts across systems, enrich them with contextual data, and automate remediation steps. Use Keep to maintain service uptime and reduce the burden of on-call duties by ensuring actionable alerts. + +### For Software Engineers +Software engineers can use Keep to understand the context of alerts that impact their services. By integrating alert enrichment and automated workflows, they can quickly identify and resolve issues without sifting through raw logs or multiple monitoring tools. + +### For Engineering Managers +Keep helps engineering managers track and manage the overall health of their systems. Gain insights into alert trends, manage noise reduction strategies, and ensure your teams focus on critical issues with Keep’s centralized dashboard and analytics. + +### For NOC Team Leads +Keep empowers NOC teams with advanced alert visualization, centralized management, and actionable insights. Use features like throttling, muting, and faceted search to streamline incident handling and minimize alert fatigue. -## Alerts enrichment -You're no longer constrained by the alerting mechanisms implemented in your tools. +### For Heads of IT Operations +For heads of IT operations, Keep provides an enterprise-ready yet flexible solution for managing complex environments. Gain visibility into system health, ensure compliance with SLAs, and scale your operations with Keep’s automation and alert correlation capabilities. -Want alerts to be triggered solely for your enterprise customers? No problem. -Want to include additional context not available in your current tools? Easy. +--- + +## By Use Case + +### Central Alert Management +No more navigating between multiple Prometheus instances and dealing with per-region, per-account CloudWatch settings. By linking your alert-triggering tools to Keep, you gain a centralized dashboard for managing all your alerts. Review, throttle, mute, and fine-tune alerts from a single console. + +### Alerts Enrichment +Keep allows you to enrich alerts with additional context from observability tools, databases, and ticketing systems. Need enterprise-specific alert triggers or want to include extra details about customer impact? Keep makes it easy to augment alerts for better decision-making. -Simply connect your observability tools, databases, ticketing systems, or any other tools that can provide additional context, and integrate them with your alerts. +### Automate Alert Response +Automate responses to common alerts, reducing the time spent on repetitive tasks. For example, confirm a 502 error on an endpoint with an additional query or check if an issue affects a low-priority customer before escalating it to your team. -## Automate the alert response process -The saying goes, "If you can automate the response to an alert, it shouldn't be an alert," right? +### Multi-Environment Monitoring +Centralize alerts across multiple environments, such as staging, production, and testing. Keep helps you manage environment-specific rules while providing a unified view of your system health. -While that might hold true in an ideal world, we understand that many times the response to an alert can be automated—whether by double-checking or taking steps to verify that the alert is not a false positive. +### Noise Reduction +Use deduplication, throttling, and muting to significantly reduce noise from excessive or redundant alerts. Keep ensures your teams are only notified of critical issues. -Consider a common scenario—you receive a 502 error on one of your endpoints. That's alert-worthy, isn't it? +### SLA Compliance +Track alert resolution times and ensure compliance with SLAs. Keep’s automation and reporting features enable you to monitor and meet contractual obligations seamlessly. -But what if you could confirm that it's a genuine error with an additional query? Or even determine if it's a free-trial user whose issue can wait until morning? +### Incident Correlation +Correlate related alerts to identify the root cause of incidents quickly. Use Keep’s workflows and mapping rules to group alerts and provide actionable insights for resolution. + +### Ticketing Integration +Sync alerts with ticketing tools like Jira and ServiceNow. Automate ticket creation, track updates, and ensure seamless workflows between operations and development teams. + +--- diff --git a/docs/overview/workflow-automation.mdx b/docs/overview/workflow-automation.mdx new file mode 100644 index 0000000000..729ccd323f --- /dev/null +++ b/docs/overview/workflow-automation.mdx @@ -0,0 +1,39 @@ +--- +title: "Workflows" +--- + +Workflow automation designed to transform how you manage alerts and incidents. + +It allows you to automate responses, integrate seamlessly with your existing tools, and build complex workflows tailored to your needs. With workflow automation, you can reduce manual effort, improve response times, and ensure consistent handling of recurring scenarios. + + + + + + +This section provides an abstract overview of workflows in Keep. To dive deeper into creating and managing workflows, refer to the dedicated [Workflow Documentation](#workflow-documentation) and explore our [GitHub repository](https://github.com/keephq/keep/tree/main/examples/workflows) for ready-to-use examples. + + +## Why Workflow Automation is Core + +Every alert, incident, or integration can be part of a workflow. + +Whether it’s auto-creating tickets, sending Slack notifications, or enriching alerts with external data, workflows are central to making Keep a powerful and flexible tool for your IT operations. + +## Explore Further + +### 1. Detailed Workflow Documentation +Explore [Workflow Documentation](#workflow-documentation) to learn: +- How to define triggers, actions, and steps. +- Best practices for designing efficient workflows. +- Advanced use cases, such as conditional branching and multi-step automation. + +### 2. Workflow Examples on GitHub +Check out our [GitHub repository](https://github.com/keephq/keep/tree/main/examples/workflows) for: +- Pre-built workflows ready to use in your environment. +- Examples for common use cases, such as auto-remediation, alert enrichment, and multi-channel notifications. +- Contributions from the community, showcasing innovative ways to use Keep workflows. + +--- + +Workflow automation is at the heart of Keep’s mission to make AIOps accessible and actionable. Use this as a starting point, and explore the rich resources available to master workflows and revolutionize your alert management. diff --git a/docs/package-lock.json b/docs/package-lock.json index 6ab682576c..e7c2e35c87 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -1,6 +1,7 @@ { - "name": "docs", - "lockfileVersion": 3, - "requires": true, - "packages": {} -} + "name": "docs", + "lockfileVersion": 3, + "requires": true, + "packages": {} + } + \ No newline at end of file diff --git a/docs/platform/alerts.mdx b/docs/platform/alerts.mdx deleted file mode 100644 index cf9bdc9537..0000000000 --- a/docs/platform/alerts.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: "Alerts" -sidebarTitle: Alerts ---- - -## Overview -You can manage Alerts programmatically using the Alerts API. -The alerts page let you manage your alerts in a single pane of glass. - - -## View your alerts - -By connecting Providers, you get a single pane of glass for your alerts: - - - - -## Pushed alerts - - - - -See all of the alerts that were pushed into Keep. - -## Pulled alerts - - - - -See all of the alerts that were pulled by Keep. - - -## Alert history -To see an alert history, just click on the history button: - - - - - -## Go to the original alert -You can see your alert in the origin tool by clicking on "Open Alert": - - - diff --git a/docs/platform/overview.mdx b/docs/platform/overview.mdx deleted file mode 100644 index 222791ec71..0000000000 --- a/docs/platform/overview.mdx +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: "Overview" -sidebarTitle: Overview ---- -Keep is fully open source. If you want to start Keep on your local environment, see the deployment section. -Keep is API first. Everything you do on the UI can be done via API. - -The platform is accessible on https://platform.keephq.dev and let you start the journey of improving your alerts. - -The platform is currently built on top of: - -1. [Providers](/platform/providers) - connect your stack to Keep. -2. [Alerts](/platform/alerts) - single pane of glass for your alerts. -3. [Workflows](/platform/workflows) - create automations on top of your alerts (or regardless). -4. [Workflow Builder](/platform/workflowbuilder) - a visual builder for your workflows. -5. [Settings](/platform/settings) - the settings page (add users, etc). diff --git a/docs/platform/providers.mdx b/docs/platform/providers.mdx deleted file mode 100644 index eff84979c8..0000000000 --- a/docs/platform/providers.mdx +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "Providers" -sidebarTitle: Providers ---- - -## Overview -You can manage Providers programmatically using the Providers API. -The providers page let you manage your Providers in easy way. - - - - - -## Connecting your first Provider -To connect a Provider, just click on the Provider tile. - - - -You'll need to provide: -1. Provider name. -2. Provider authentication details. -Each provider has its own authentication process. To learn about a specific Provider authentication, you can go to the Provider page. - - -## Webhook Integration -The webhook integration let you seamlessly push your alerts to Keep. -It means that when you connect the Provider, Keep hooks the Provider configuration to add itself as a destination. -After install - -A webhook integration available if it has "Webhook available" icon: - - - - -## Delete a Provider -To delete a Provider, just click on the right bottom corner of the Provider tile: - - - - diff --git a/docs/platform/settings.mdx b/docs/platform/settings.mdx deleted file mode 100644 index 6f472a9777..0000000000 --- a/docs/platform/settings.mdx +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: "Settings" -sidebarTitle: Settings ---- - -# Overview -Setup and configure Keep. - -## Users -Add or remove users from your tenant. - - - - - -## Webhook -View your tenant webhook settings. - - - - - -## SMTP -Configure your SMTP server to send emails. - - - - - -### Get an API Key - - - diff --git a/docs/platform/workflowbuilder.mdx b/docs/platform/workflowbuilder.mdx deleted file mode 100644 index 59fe2d7eac..0000000000 --- a/docs/platform/workflowbuilder.mdx +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: "Workflow builder" -sidebarTitle: Workflow builder ---- -Under construction diff --git a/docs/platform/workflows.mdx b/docs/platform/workflows.mdx deleted file mode 100644 index 50400340a0..0000000000 --- a/docs/platform/workflows.mdx +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: "Workflows" -sidebarTitle: Workflows ---- - -## Overview -You can manage Workflows programmatically using the Workflow API. -The Workflows page implements a convinent interface to manage your workflows. - -Workflows can be triggered by: - -1. Alert -2. Predefined interval -3. Manually - -You can learn more about how to [create workflows](/workflows/getting-started) and about the [workflows syntax](/workflows/syntax) in the workflows docs or in the [examples](https://github.com/keephq/keep/tree/main/examples/workflows) section in Keep repository. - -## Upload a Workflow -Drop your workflow yaml file or use the "upload workflow": - - - - -## Connect a missing Provider -When you upload a Workflow, Keep parse it and checks what Providers this workflow needs in order to run. -If the provider is not installed, the Workflow won't be able to run. To install the missing Provider, just click on the "install" link: - - - - - -You will be taken to the specific Provider installation page: - - - - -## Run a Workflow manually -If a Workflow was defined with the manual trigger, the "run manually" button will be enabled: - - - - - - -## Delete a Workflow -To delete a Workflow, just click on the "trash icon": - - - - - -## View Workflow executions history - - - - -## View a specific Workflow execution logs - - - diff --git a/docs/providers/adding-a-new-provider.mdx b/docs/providers/adding-a-new-provider.mdx index 2fccc2dc28..87ae95826d 100644 --- a/docs/providers/adding-a-new-provider.mdx +++ b/docs/providers/adding-a-new-provider.mdx @@ -2,26 +2,290 @@ title: "Adding a new Provider" sidebarTitle: "Adding a New Provider" --- -Under contstruction - -### Basics - -- BaseProvider is the base class every provider needs to inherit from -- BaseProvider exposes 4 important functions: - - `query(self, **kwargs: dict)` which is used to query the provider in steps - - `notify(self, **kwargs: dict)` which is used to notify via the provider in actions - - `dispose(self)` which is used to dispose the provider after usage (e.g. close the connection to the DB) - - `validate_config(self)` which is used to validate the configuration passed to the Provider -- And 4 functions that are not required: - - `get_alerts(self)` which is used to fetch configured alerts (**not the currently active alerts**) - - `deploy_alert(self, alert: dict, alert_id: Optional[str]` which is used to deploy an alert to the provider - - `get_alert_schema(self)` which is used to describe the provider's API schema of how to deploy alert - - `get_logs(self, limit)` which is used to fetch logs from the provider (currently used by the AI layer to generate more accurate results) -- Providers must be located in the providers directory -- Provider directory must start with the provider's unique identifier followed by underscore+provider (e.g. `slack_provider`) -- Provider file name must start with the provider's unique identifier followed by underscore+provider+.py (e.g. `slack_provider.py`) - -### ProviderScope + +This guide explains how to create a new provider for Keep. Providers are integrations that allow Keep to interact with external services for alerting, querying data, managing incidents, or building topology maps. + +## Table of contents +- [Provider structure](#provider-structure) +- [Step-by-step implementation](#step-by-step-implementation) +- [Provider attributes](#provider-attributes) +- [Abstract methods](#abstract-methods) +- [Provider types and capabilities](#provider-types-and-capabilities) +- [Authentication configuration](#authentication-configuration) +- [Testing your provider](#testing-your-provider) +- [Best practices](#best-practices) +- [Common patterns](#common-patterns) +- [Complete provider example](#complete-provider-example) +- [Checklist](#checklist) + +## Provider structure + +Each provider in Keep follows a specific structure: + +``` +keep/providers/ +├── yourservice_provider/ +│ ├── __init__.py +│ └── yourservice_provider.py +``` + +**Important Notes:** +- Keep's ProvidersFactory automatically discovers providers based on the directory naming convention (`*_provider`). +- You don't need to register them explicitly - just follow the naming pattern. +- The provider type is automatically extracted from the class name (for example, `ServiceNowProvider` → `servicenow`). + +## Step-by-step implementation + +### 1. Create provider directory + +Create a new directory under `keep/providers/` with the pattern `{service}_provider`: + +```bash +mkdir keep/providers/yourservice_provider +``` + +### 2. Create the provider module + +Create `yourservice_provider.py` with the following structure: + +```python +""" +YourService Provider is a class that allows integration with YourService. +""" + +import dataclasses +import json +import os +from typing import Optional, List, Dict, Any + +import pydantic +import requests + +from keep.api.models.alert import AlertDto, AlertSeverity, AlertStatus +from keep.contextmanager.contextmanager import ContextManager +from keep.providers.base.base_provider import BaseProvider +from keep.providers.models.provider_config import ProviderConfig, ProviderScope +from keep.providers.models.provider_method import ProviderMethod + + +@pydantic.dataclasses.dataclass +class YourserviceProviderAuthConfig: + """YourService authentication configuration.""" + + api_endpoint: str = dataclasses.field( + metadata={ + "required": True, + "description": "YourService API endpoint URL", + "validation": "https_url", # Optional: validates HTTPS URLs + } + ) + + api_key: str = dataclasses.field( + metadata={ + "required": True, + "description": "API key for YourService", + "sensitive": True, # Marks field as sensitive in UI + } + ) + + region: str = dataclasses.field( + default="us-east-1", + metadata={ + "required": False, + "description": "YourService region", + "type": "select", + "options": ["us-east-1", "eu-west-1", "ap-south-1"], + } + ) + + +class YourserviceProvider(BaseProvider): + """Send alerts and fetch data from YourService.""" + + # Required: Display name shown in UI + PROVIDER_DISPLAY_NAME = "YourService" + + # Required: Categories for provider classification + PROVIDER_CATEGORY = ["Monitoring"] + + # Optional: Tags for searchability + PROVIDER_TAGS = ["alert", "data"] + + # Optional: Define required scopes/permissions + PROVIDER_SCOPES = [ + ProviderScope( + name="read:alerts", + description="Read alerts from YourService", + mandatory=True, + documentation_url="https://docs.yourservice.com/permissions", + alias="Read Alerts", + ), + ProviderScope( + name="write:alerts", + description="Create and update alerts", + mandatory=False, + mandatory_for_webhook=True, # Required only for webhook setup + ), + ] + + # Optional: OAuth2 URL (MUST be set as class attribute, not in __init__) + OAUTH2_URL = None # Or os.environ.get("YOURSERVICE_OAUTH2_URL") + + def __init__( + self, context_manager: ContextManager, provider_id: str, config: ProviderConfig + ): + super().__init__(context_manager, provider_id, config) + # Initialize any client libraries or state here + # Note: Logger is automatically available as self.logger + + # Context manager provides access to: + # - self.context_manager.tenant_id: Current tenant ID + # - self.context_manager.workflow_id: Current workflow ID + # - self.context_manager.workflow_execution_id: Current execution ID + # - self.context_manager.get_full_context(): Full workflow context + + def validate_config(self): + """ + Validates required configuration for YourService provider. + + This is an abstract method that MUST be implemented. + """ + self.authentication_config = YourserviceProviderAuthConfig( + **self.config.authentication + ) + + def dispose(self): + """ + Cleanup any resources when provider is disposed. + + This is an abstract method that MUST be implemented, even if it just passes. + """ + pass +``` + +### 3. Create the __init__.py File + +Create `keep/providers/yourservice_provider/__init__.py`: + +```python +from keep.providers.yourservice_provider.yourservice_provider import ( + YourserviceProvider, + YourserviceProviderAuthConfig +) + +__all__ = ["YourserviceProvider", "YourserviceProviderAuthConfig"] +``` + +### 4. Add provider documentation + +Create `docs/providers/documentation/yourservice-provider.mdx` following the documentation template. + + +Provider configuration fields are automatically documented through auto-generated snippets. Keep generates the snippet files in `docs/snippets/providers/` from the provider's AuthConfig metadata and includes them in the documentation automatically. + + +## Provider architecture + +### Abstract methods + +Every provider must implement these two abstract methods from BaseProvider: + +1. **`validate_config(self)`** - Validates and processes the provider configuration +2. **`dispose(self)`** - Clean up resources when the provider is disposed of + +### Provider capabilities + +Providers expose capabilities through standard methods: + +- **`_notify(**kwargs)`** - Send notifications or alerts +- **`_query(**kwargs)`** - Query data from the provider +- **`_get_alerts()`** - Fetch alerts for monitoring +- **`setup_webhook(...)`** - Configure webhook endpoints +- **`validate_scopes()`** - Check provider permissions +- **`expose()`** - Return parameters calculated during execution for use in workflows + + +The public methods `notify()` and `query()` wrap the private implementations (`_notify()` and `_query()`) with additional capabilities like enrichment and error handling. Always implement the private methods. + + +### Provider discovery + +Keep automatically discovers providers based on naming conventions: + +- Location: `keep/providers/` directory +- Directory naming: Must end with `_provider` (for example, `slack_provider`) +- Main file: Must match directory name with `.py` extension (for example, `slack_provider.py`) +- No explicit registration needed - just follow the naming convention + +### Implementation examples + +#### Validate_config() +```python +def validate_config(self): + """Validate and process provider configuration.""" + self.authentication_config = YourserviceProviderAuthConfig( + **self.config.authentication + ) +``` + +#### Dispose() +```python +def dispose(self): + """Cleanup any resources.""" + # Close connections, cleanup clients, etc. + # Can just pass if no cleanup needed + pass +``` + +### Provider type extraction + +The provider type is automatically extracted from your class name: +- `YourserviceProvider` → `yourservice` +- `ServiceNowProvider` → `service.now` +- `DatadogProvider` → `datadog` + +This happens via the `_extract_type()` method in BaseProvider. + +### Provider attributes + +Providers should define the following class attributes: + +- `PROVIDER_DISPLAY_NAME`: String used for UI display (for example, "Slack") +- `PROVIDER_CATEGORY`: List of categories from the allowed values (see Provider Categories section) +- `PROVIDER_COMING_SOON`: Boolean flag to mark providers as not ready (default: False) +- `WEBHOOK_INSTALLATION_REQUIRED`: Boolean to make webhook setup mandatory in UI (default: False) +- `PROVIDER_TAGS`: List of tags describing provider capabilities (for example, ["alert", "messaging"]) +- `PROVIDER_SCOPES`: List of ProviderScope objects defining required permissions +- `PROVIDER_METHODS`: List of ProviderMethod objects for additional capabilities (see [Provider Methods](/providers/provider-methods)) +- `FINGERPRINT_FIELDS`: List of field names used to calculate alert fingerprints +- `OAUTH2_URL`: OAuth 2.0 authorization URL if provider supports OAuth 2.0 authentication + +### Provider categories + +Providers must specify one or more categories from the following list: + +```python +PROVIDER_CATEGORY: list[Literal[ + "AI", "Monitoring", "Incident Management", "Cloud Infrastructure", + "Ticketing", "Identity", "Developer Tools", "Database", + "Identity and Access Management", "Security", "Collaboration", + "Organizational Tools", "CRM", "Queues", "Orchestration", "Others" +]] +``` + +### Provider tags + +Valid options for `PROVIDER_TAGS`: +- `"alert"` - Provider handles alerts +- `"ticketing"` - Provider manages tickets +- `"messaging"` - Provider sends messages +- `"data"` - Provider queries data +- `"queue"` - Provider manages queues +- `"topology"` - Provider provides topology data +- `"incident"` - Provider manages incidents + +### Provider scope + ```python @dataclass class ProviderScope: @@ -45,7 +309,7 @@ class ProviderScope: alias: Optional[str] = None ``` -### ProviderConfig +### Provider config ```python @dataclass @@ -74,7 +338,7 @@ class ProviderConfig: self.authentication[key] = chevron.render(value, {"env": os.environ}) ``` -### BaseProvider +### Base provider ```python """ @@ -86,16 +350,21 @@ class BaseProvider(metaclass=abc.ABCMeta): PROVIDER_METHODS: list[ProviderMethod] = [] FINGERPRINT_FIELDS: list[str] = [] PROVIDER_TAGS: list[ - Literal["alert", "ticketing", "messaging", "data", "queue"] + Literal["alert", "ticketing", "messaging", "data", "queue", "topology", "incident"] ] = [] + PROVIDER_DISPLAY_NAME: str = None + PROVIDER_CATEGORY: list[str] = [] + PROVIDER_COMING_SOON: bool = False + WEBHOOK_INSTALLATION_REQUIRED: bool = False def __init__( self, context_manager: ContextManager, provider_id: str, config: ProviderConfig, - webhooke_template: Optional[str] = None, + webhook_template: Optional[str] = None, webhook_description: Optional[str] = None, + webhook_markdown: Optional[str] = None, provider_description: Optional[str] = None, ): """ @@ -108,7 +377,7 @@ class BaseProvider(metaclass=abc.ABCMeta): self.provider_id = provider_id self.config = config - self.webhooke_template = webhooke_template + self.webhook_template = webhook_template self.webhook_description = webhook_description self.provider_description = provider_description self.context_manager = context_manager @@ -144,7 +413,7 @@ class BaseProvider(metaclass=abc.ABCMeta): raise NotImplementedError("dispose() method not implemented") @abc.abstractmethod - def validate_config(): + def validate_config(self): """ Validate provider configuration. """ @@ -174,13 +443,20 @@ class BaseProvider(metaclass=abc.ABCMeta): if not enrich_alert or not results: return results if results else None - self._enrich_alert(enrich_alert, results) + self._enrich(enrich_alert, results) return results - def _enrich_alert(self, enrichments, results): + def _enrich(self, enrichments, results, audit_enabled=True): """ - Enrich alert with provider specific data. - + Enrich alert or incident with provider specific data. + + This method replaces the deprecated _enrich_alert method and supports both + alert and incident enrichment. + + Args: + enrichments: List of enrichment configurations + results: Results from the provider action + audit_enabled: Whether to audit the enrichment operation (default: True) """ self.logger.debug("Extracting the fingerprint from the alert") if "fingerprint" in results: @@ -192,7 +468,7 @@ class BaseProvider(metaclass=abc.ABCMeta): ) # else, if we are in an event context, use the event fingerprint elif self.context_manager.event_context: - # TODO: map all casses event_context is dict and update them to the DTO + # TODO: map all cases event_context is dict and update them to the DTO # and remove this if statement if isinstance(self.context_manager.event_context, dict): fingerprint = self.context_manager.event_context.get("fingerprint") @@ -272,12 +548,24 @@ class BaseProvider(metaclass=abc.ABCMeta): enrich_alert = kwargs.get("enrich_alert", []) if enrich_alert: - self._enrich_alert(enrich_alert, results) + self._enrich(enrich_alert, results) # and return the results return results @staticmethod - def _format_alert(event: dict) -> AlertDto | list[AlertDto]: + def _format_alert( + event: dict | list[dict], provider_instance: "BaseProvider" = None + ) -> AlertDto | list[AlertDto]: + """ + Format incoming event(s) into AlertDto object(s). + + Args: + event: Single event dict or list of event dicts + provider_instance: Optional provider instance for context + + Returns: + AlertDto or list of AlertDto objects + """ raise NotImplementedError("format_alert() method not implemented") @classmethod @@ -402,18 +690,21 @@ class BaseProvider(metaclass=abc.ABCMeta): def setup_webhook( self, tenant_id: str, keep_api_url: str, api_key: str, setup_alerts: bool = True - ): + ) -> dict | None: """ Setup a webhook for the provider. Args: - tenant_id (str): _description_ - keep_api_url (str): _description_ - api_key (str): _description_ - setup_alerts (bool, optional): _description_. Defaults to True. + tenant_id (str): The tenant ID + keep_api_url (str): The Keep API URL for webhook callbacks + api_key (str): The API key for authentication + setup_alerts (bool, optional): Whether to setup alerts. Defaults to True. + Returns: + dict | None: Dictionary of secrets to be saved if any, None otherwise + Raises: - NotImplementedError: _description_ + NotImplementedError: If not implemented by the provider """ raise NotImplementedError("setup_webhook() method not implemented") @@ -442,23 +733,25 @@ class BaseProvider(metaclass=abc.ABCMeta): raise NotImplementedError("oauth2_logic() method not implemented") @staticmethod - def parse_event_raw_body(raw_body: bytes) -> bytes: + def parse_event_raw_body(raw_body: bytes | dict) -> dict: """ - Parse the raw body of an event and create an ingestable dict from it. + Parse the raw body of an event and create an ingestible dict from it. For instance, in parseable, the "event" is just a string > b'Alert: Server side error triggered on teststream1\nMessage: server reporting status as 500\nFailing Condition: status column equal to abcd, 2 times' and we want to return an object - > b"{'alert': 'Server side error triggered on teststream1', 'message': 'server reporting status as 500', 'failing_condition': 'status column equal to abcd, 2 times'}" + > {'alert': 'Server side error triggered on teststream1', 'message': 'server reporting status as 500', 'failing_condition': 'status column equal to abcd, 2 times'} - If this method is not implemented for a provider, just return the raw body. + If this method is not implemented for a provider, it should convert the raw body to a dict. Args: - raw_body (bytes): The raw body of the incoming event (/event endpoint in alerts.py) + raw_body (bytes | dict): The raw body of the incoming event (can be bytes or dict) Returns: - dict: Ingestable event + dict: Ingestible event dictionary """ + if isinstance(raw_body, dict): + return raw_body return raw_body def get_logs(self, limit: int = 5) -> list: @@ -573,3 +866,785 @@ class BaseProvider(metaclass=abc.ABCMeta): f"Failed to push alert to {self.provider_id}: {response.content}" ) ``` + +## Provider types and capabilities + +### Base provider types + +Keep supports several base provider types, each with specific capabilities: + +1. **BaseProvider** (`keep/providers/base/base_provider.py`) + - Basic provider capabilities + - Methods: `_notify()`, `_query()`, `_get_alerts()` + - Use for: General integrations + +2. **BaseTopologyProvider** (`keep/providers/base/base_provider.py`) + - Extends BaseProvider + - Methods: `pull_topology()` + - Use for: Services that provide infrastructure topology data + - Example: Datadog Provider (`keep/providers/datadog_provider/datadog_provider.py`) + +3. **BaseIncidentProvider** (`keep/providers/base/base_provider.py`) + - Extends BaseProvider + - Methods: `_get_incidents()`, `_format_incident()` (static), `format_incident()` (classmethod), `setup_incident_webhook()` + - Use for: Incident management systems + - Example: PagerDuty Provider (`keep/providers/pagerduty_provider/pagerduty_provider.py`) + +### Common capabilities + +#### 1. Notification (`_notify`) +Send alerts or messages to external services: +```python +def _notify(self, title: str, description: str = "", **kwargs) -> dict: + # Implementation +``` + +#### 2. Query (`_query`) +Fetch data from external services: +```python +def _query(self, query: str, **kwargs) -> list: + # Implementation +``` + +#### 3. Alert Fetching (`_get_alerts`) +Pull alerts for monitoring: +```python +def _get_alerts(self) -> List[AlertDto]: + # Implementation +``` + +#### 4. Webhook support +Handle incoming webhooks: +```python +@staticmethod +def parse_event_raw_body(raw_body: bytes | str) -> dict: + # Parse webhook payload + +@staticmethod +def _format_alert(event: dict, provider_instance: "BaseProvider" = None) -> AlertDto | list[AlertDto]: + # Format webhook events into alerts +``` + +#### 5. OAuth 2.0 support +Handle OAuth 2.0 authentication: +```python +# IMPORTANT: Define OAUTH2_URL as a class attribute at the class level, NOT in __init__ +class YourserviceProvider(BaseProvider): + OAUTH2_URL = os.environ.get("YOURSERVICE_OAUTH2_URL") # Must be at class level + +@staticmethod +def oauth2_logic(**payload) -> dict: + # OAuth 2.0 implementation +``` + +#### 6. Consumer providers +For providers that consume messages from queues or streams: +```python +def start_consume(self): + """ + Start consuming messages from the provider. + + This method is called when Keep starts the provider as a consumer. + Implement long-running consumption logic here. + """ + # Example: Kafka consumer + while True: + message = self.consumer.poll() + if message: + self._push_alert(message) + +@property +def is_consumer(self) -> bool: + """Provider is automatically detected as consumer if start_consume is implemented.""" + return True # Automatically set if start_consume is overridden + +def status(self) -> dict: + """Return the status of the consumer.""" + return { + "status": "running" if self.consumer_active else "stopped", + "error": self.last_error if hasattr(self, 'last_error') else "" + } +``` + +### Specialized base classes + +Keep provides specialized base classes for specific provider types: + +#### Base topology provider + +For providers that manage infrastructure topology and service dependencies: + +```python +from keep.providers.base.base_topology_provider import BaseTopologyProvider + +class MyTopologyProvider(BaseTopologyProvider): + def pull_topology(self) -> tuple[list[TopologyServiceInDto], dict]: + """ + Pull topology data from the provider. + + Returns: + tuple: A tuple of (services list, edges dict) + """ + # Implement topology fetching logic + pass +``` + +#### BaseIncidentProvider + +For providers that manage incidents and incident response: + +```python +from keep.providers.base.base_incident_provider import BaseIncidentProvider + +class MyIncidentProvider(BaseIncidentProvider): + def _get_incidents(self) -> list[IncidentDto]: + """ + Fetch incidents from the provider (abstract method). + + Returns: + list[IncidentDto]: List of incidents + """ + # Implement incident fetching logic + pass + + @staticmethod + def _format_incident( + event: dict, provider_instance: "BaseProvider" = None + ) -> IncidentDto | list[IncidentDto]: + """ + Format raw incident data into IncidentDto objects. + + Args: + event: Raw incident data from webhook or API + provider_instance: Optional provider instance for context + + Returns: + IncidentDto or list of IncidentDto objects + """ + # Implement incident formatting logic + pass + + def setup_incident_webhook( + self, + tenant_id: str, + keep_api_url: str, + api_key: str, + setup_alerts: bool = True, + ) -> dict | None: + """ + Setup webhook for incident updates. + + Args: + tenant_id: Tenant identifier + keep_api_url: Keep API URL for callbacks + api_key: API key for authentication + setup_alerts: Whether to also setup alert webhooks + + Returns: + dict | None: Secrets to save if any + """ + # Implement webhook setup logic + pass +``` + +Note: The `get_incidents()` method is automatically provided by the base class and wraps `_get_incidents()`. The `format_incident()` class method handles provider loading and calls `_format_incident()`. + +### Authentication configuration + +Providers should define an authentication configuration class as a dataclass with proper field types and validation: + +```python +import dataclasses +import pydantic +from keep.validation.fields import HttpsUrl, NoSchemeUrl, UrlPort + +@pydantic.dataclasses.dataclass +class MyProviderAuthConfig: + """Configuration for MyProvider authentication.""" + + api_key: str = dataclasses.field( + metadata={ + "required": True, + "description": "API Key for authentication", + "sensitive": True, # Masks the field value in UI + } + ) + + api_url: HttpsUrl = dataclasses.field( + default="https://api.example.com", + metadata={ + "required": False, + "description": "API endpoint URL (HTTPS only)", + "documentation_url": "https://docs.example.com/api", + "validation": "https_url", # Maps to HttpsUrl validator + } + ) + + host: NoSchemeUrl = dataclasses.field( + metadata={ + "required": True, + "description": "Service hostname", + "hint": "example.com or 192.168.1.1", + "validation": "no_scheme_url", # Maps to NoSchemeUrl validator + } + ) + + port: UrlPort = dataclasses.field( + default=443, + metadata={ + "required": False, + "description": "Service port", + "validation": "port", # Validates port range 1-65535 + } + ) + + workspace_id: str = dataclasses.field( + metadata={ + "required": True, + "description": "Workspace identifier", + "hint": "Can be found in Settings > Workspace", + } + ) + + region: str = dataclasses.field( + default="us-east-1", + metadata={ + "required": False, + "description": "Service region", + "type": "select", # Renders as dropdown in UI + "options": ["us-east-1", "eu-west-1", "ap-south-1"], + } + ) +``` + +#### Field validation + +Keep provides built-in field validation through custom Pydantic field types: + +| Validation Type | Field Type | Description | Example | +|----------------|------------|-------------|---------| +| `"https_url"` | `HttpsUrl` | Validates HTTPS URLs only | `https://api.example.com` | +| `"any_http_url"` | `pydantic.AnyHttpUrl` | Validates any HTTP/HTTPS URL | `http://example.com` | +| `"no_scheme_url"` | `NoSchemeUrl` | Validates URLs without scheme | `example.com:8080` | +| `"port"` | `UrlPort` | Validates port numbers (1-65535) | `443` | +| `"multihost_url"` | `MultiHostUrl` | Validates multi-host URLs | `mongodb://host1:27017,host2:27017` | +| `"no_scheme_multihost_url"` | `NoSchemeMultiHostUrl` | Multi-host URLs without scheme | `host1:9092,host2:9092` | + +To use validation: +1. Import the appropriate field type from `keep.validation.fields` +2. Use it as the field type annotation +3. Add the corresponding validation string in metadata + +Example implementations: + +```python +# HTTPS-only webhook URL +webhook_url: HttpsUrl = dataclasses.field( + metadata={ + "required": True, + "description": "Webhook endpoint (HTTPS required)", + "sensitive": True, + "validation": "https_url", + } +) + +# Database connection with multiple hosts +connection_string: MultiHostUrl = dataclasses.field( + metadata={ + "required": True, + "description": "Database connection string", + "hint": "mongodb://host1:27017,host2:27017/dbname", + "validation": "multihost_url", + } +) + +# SSH connection +ssh_host: NoSchemeUrl = dataclasses.field( + metadata={ + "required": True, + "description": "SSH hostname or IP", + "validation": "no_scheme_url", + } +) + +ssh_port: UrlPort = dataclasses.field( + default=22, + metadata={ + "required": False, + "description": "SSH port", + "validation": "port", + } +) +``` + +#### Metadata fields reference + +- `required`: Whether the field is mandatory +- `description`: Field description shown in UI +- `sensitive`: Whether to mask the field value (for secrets) +- `hidden`: Whether to hide the field in UI +- `documentation_url`: Link to relevant documentation +- `hint`: Help text for users +- `validation`: Validation type string (see preceding table) +- `type`: UI input type (for example, "select" for dropdown) +- `options`: List of valid options for select fields +- `config_main_group`: Group name for organizing fields in UI +- `config_sub_group`: Sub-group name for nested organization + + +The validation system ensures that configuration values are valid before Keep instantiates the provider. Invalid values are rejected with clear error messages, improving the user experience and preventing runtime errors. + + +## Testing your provider + +### 1. Unit test + +Create `tests/test_yourservice_provider.py`: + +```python +import pytest +from keep.providers.yourservice_provider.yourservice_provider import YourserviceProvider +from keep.providers.models.provider_config import ProviderConfig +from keep.contextmanager.contextmanager import ContextManager + + +def test_yourservice_provider_init(): + """Test provider initialization.""" + config = ProviderConfig( + authentication={ + "api_endpoint": "https://api.yourservice.com", + "api_key": "test-key", + } + ) + + context_manager = ContextManager(tenant_id="test", workflow_id="test") + provider = YourserviceProvider( + context_manager=context_manager, + provider_id="test", + config=config + ) + + assert provider.authentication_config.api_endpoint == "https://api.yourservice.com" + assert provider.authentication_config.api_key == "test-key" + + +@pytest.fixture +def mock_requests(monkeypatch): + """Mock requests module.""" + import requests + class MockResponse: + def __init__(self, json_data, status_code=200): + self.json_data = json_data + self.status_code = status_code + + def json(self): + return self.json_data + + def raise_for_status(self): + pass + + def mock_post(*args, **kwargs): + return MockResponse({"success": True}) + + def mock_get(*args, **kwargs): + return MockResponse({"alerts": []}) + + monkeypatch.setattr(requests, "post", mock_post) + monkeypatch.setattr(requests, "get", mock_get) + + +def test_yourservice_notify(mock_requests): + """Test notification sending.""" + config = ProviderConfig( + authentication={ + "api_endpoint": "https://api.yourservice.com", + "api_key": "test-key", + } + ) + + context_manager = ContextManager(tenant_id="test", workflow_id="test") + provider = YourserviceProvider( + context_manager=context_manager, + provider_id="test", + config=config + ) + + result = provider.notify(message="Test message") + assert result["success"] is True +``` + +### 2. Integration test + +Test with the provider factory: + +```python +def test_provider_factory_loading(): + """Test that provider loads correctly through factory.""" + from keep.providers.providers_factory import ProvidersFactory + + # Get provider class + provider_class = ProvidersFactory.get_provider_class("yourservice") + assert provider_class.__name__ == "YourserviceProvider" + + # Get all providers + all_providers = ProvidersFactory.get_all_providers() + yourservice = next((p for p in all_providers if p.type == "yourservice"), None) + assert yourservice is not None + assert yourservice.display_name == "YourService" +``` + +### 3. Manual testing + +You can test your provider by running it directly: +```bash +cd keep +python -m keep.providers.yourservice_provider.yourservice_provider +``` + +The `if __name__ == "__main__":` block allows you to test provider initialization and basic capabilities. + +Add a test block to your provider for direct execution: + +```python +if __name__ == "__main__": + # Test the provider directly + import logging + + logging.basicConfig(level=logging.DEBUG, handlers=[logging.StreamHandler()]) + context_manager = ContextManager( + tenant_id="singletenant", + workflow_id="test", + ) + + # Initialize the provider with test config + config = ProviderConfig( + authentication={ + "api_endpoint": "https://api.yourservice.com", + "api_key": "test-key", + } + ) + + provider = YourserviceProvider( + context_manager=context_manager, + provider_id="test", + config=config + ) + + # Test provider methods + print("Provider initialized successfully!") + + # Test specific functionality + try: + result = provider._query("test query") + print(f"Query result: {result}") + except Exception as e: + print(f"Query failed: {e}") +``` + +## Best practices + +### 1. Error handling + +Always handle API errors gracefully: + +```python +from keep.exceptions.provider_exception import ProviderException + +try: + response = requests.get(url) + response.raise_for_status() +except requests.exceptions.RequestException as e: + raise ProviderException(f"Failed to fetch data: {str(e)}") +``` + +### 2. Logging + +Use the provider's logger: + +```python +self.logger.info("Fetching alerts from YourService") +self.logger.error(f"Failed to connect: {str(e)}") +``` + +### 3. Configuration validation + +Validate configuration in `validate_config()`: + +```python +def validate_config(self): + self.authentication_config = YourserviceProviderAuthConfig( + **self.config.authentication + ) + + # Additional validation + if not self.authentication_config.api_endpoint.startswith("https://"): + raise ValueError("API endpoint must use HTTPS") +``` + +### 4. Alert formatting + +When returning alerts, use Keep's standard format: + +```python +from keep.api.models.alert import AlertDto, AlertSeverity, AlertStatus + +alert = AlertDto( + id="unique-alert-id", + name="Alert Title", + description="Detailed description", + severity=AlertSeverity.HIGH, + status=AlertStatus.FIRING, + lastReceived=datetime.now().isoformat(), + source=["yourservice"], + fingerprint="unique-fingerprint", + labels={"key": "value"}, + annotations={"runbook": "https://docs.example.com"}, +) +``` + +### 5. Secrets management + +Never hardcode secrets. Use environment variables or configuration: + +```python +client_id = os.environ.get("YOURSERVICE_CLIENT_ID") +if not client_id: + raise ProviderException("YOURSERVICE_CLIENT_ID environment variable not set") +``` + +## Common patterns + +### 1. Provider health checks + +Implement health monitoring using the `ProviderHealthMixin`: + +```python +from keep.providers.base.base_provider import BaseProvider, ProviderHealthMixin + +class YourserviceProvider(BaseProvider, ProviderHealthMixin): + HAS_HEALTH_CHECK = True + + # The mixin provides automatic health checking for: + # - Topology coverage validation + # - Spammy alerts detection + # - Alerting rule usage monitoring +``` + + +The health check mixin is particularly useful for monitoring providers that collect topology data or handle high volumes of alerts. + + +### 2. Pagination + +Handle paginated API responses: + +```python +def _get_all_items(self): + items = [] + page = 1 + + while True: + response = self._query_page(page) + items.extend(response["items"]) + + if not response.get("has_next"): + break + page += 1 + + return items +``` + +### 3. Rate limiting + +Respect API rate limits: + +```python +import time +from typing import Any + +def _rate_limited_request(self, url: str, **kwargs) -> Any: + max_retries = 3 + + for attempt in range(max_retries): + try: + response = requests.get(url, **kwargs) + if response.status_code == 429: # Rate limited + retry_after = int(response.headers.get("Retry-After", 60)) + self.logger.warning(f"Rate limited, waiting {retry_after}s") + time.sleep(retry_after) + continue + response.raise_for_status() + return response.json() + except Exception as e: + if attempt == max_retries - 1: + raise + time.sleep(2 ** attempt) # Exponential backoff +``` + +### 4. Caching + +Cache frequently accessed data: + +```python +from datetime import datetime, timedelta + +class YourserviceProvider(BaseProvider): + def __init__(self, context_manager, provider_id, config): + super().__init__(context_manager, provider_id, config) + self._cache = {} + self._cache_ttl = timedelta(minutes=5) + + def _get_cached_data(self, key: str) -> Any: + if key in self._cache: + data, timestamp = self._cache[key] + if datetime.now() - timestamp < self._cache_ttl: + return data + return None + + def _set_cached_data(self, key: str, data: Any): + self._cache[key] = (data, datetime.now()) +``` + +### 5. Webhook signature verification + +Verify webhook authenticity: + +```python +import hmac +import hashlib + +@staticmethod +def verify_webhook_signature(raw_body: bytes, signature: str, secret: str) -> bool: + expected = hmac.new( + secret.encode(), + raw_body, + hashlib.sha256 + ).hexdigest() + return hmac.compare_digest(expected, signature) +``` + +### 6. Exposing runtime parameters + +Use the `expose()` method to make runtime-calculated values available to workflows: + +```python +class YourserviceProvider(BaseProvider): + def __init__(self, context_manager, provider_id, config): + super().__init__(context_manager, provider_id, config) + self._from_timestamp = None + self._to_timestamp = None + + def _query(self, metric: str, from_time: str = "1h", **kwargs): + # Calculate actual timestamps + self._to_timestamp = datetime.now() + self._from_timestamp = self._to_timestamp - parse_duration(from_time) + + # Query with calculated timestamps + return self._fetch_metrics(metric, self._from_timestamp, self._to_timestamp) + + def expose(self): + """Expose calculated parameters for workflow use.""" + exposed = {} + if self._from_timestamp: + exposed["from"] = self._from_timestamp.isoformat() + if self._to_timestamp: + exposed["to"] = self._to_timestamp.isoformat() + return exposed +``` + +This allows workflows to access the actual timestamps used in queries, not just the relative time strings. + +## Complete provider example + +Here's a minimal example of a complete provider implementation: + +```python +from keep.providers.base.base_provider import BaseProvider +from keep.providers.models.provider_config import ProviderConfig +from keep.contextmanager.contextmanager import ContextManager + +class MyProvider(BaseProvider): + PROVIDER_DISPLAY_NAME = "My Service" + PROVIDER_CATEGORY = ["Monitoring", "Incident Management"] + PROVIDER_TAGS = ["alert", "messaging"] + + def __init__( + self, + context_manager: ContextManager, + provider_id: str, + config: ProviderConfig, + webhook_template: Optional[str] = None, + webhook_description: Optional[str] = None, + webhook_markdown: Optional[str] = None, + provider_description: Optional[str] = None, + ): + super().__init__( + context_manager, provider_id, config, + webhook_template, webhook_description, + webhook_markdown, provider_description + ) + + def validate_config(self): + # Validate the provider configuration + pass + + def dispose(self): + # Clean up resources + pass + + def _query(self, **kwargs): + # Implement query logic + pass + + def _notify(self, **kwargs): + # Implement notification logic + pass +``` + +## File references + +- **Base Provider Classes**: `keep/providers/base/base_provider.py` +- **Provider Models**: `keep/providers/models/` +- **Provider Factory**: `keep/providers/providers_factory.py` +- **Provider Exceptions**: `keep/exceptions/provider_exception.py` +- **Example Providers**: + - Simple: `keep/providers/slack_provider/slack_provider.py` + - Complex: `keep/providers/datadog_provider/datadog_provider.py` + - Database: `keep/providers/clickhouse_provider/clickhouse_provider.py` + - Incident: `keep/providers/pagerduty_provider/pagerduty_provider.py` + - Topology: `keep/providers/datadog_provider/datadog_provider.py` +- **Tests**: `tests/test_*_provider.py` +- **Documentation**: `docs/providers/documentation/` +- **Additional Docs**: + - `docs/providers/adding-a-new-provider.mdx` + - `docs/providers/provider-methods.mdx` + - `docs/providers/linked-providers.mdx` + +## Checklist + +- [ ] Create provider directory and files +- [ ] Implement AuthConfig class with proper metadata +- [ ] Implement provider class with required methods +- [ ] Add provider to `__init__.py` +- [ ] Set appropriate PROVIDER_DISPLAY_NAME, PROVIDER_CATEGORY, and PROVIDER_TAGS +- [ ] Implement `validate_config()` and `dispose()` +- [ ] Add at least one capability (`_notify`, `_query`, or `_get_alerts`) +- [ ] Create documentation in `docs/providers/documentation/` +- [ ] Write unit tests +- [ ] Test with provider factory +- [ ] Handle errors gracefully +- [ ] Add logging statements +- [ ] Validate in Keep UI +- [ ] If supporting webhooks, implement `_format_alert()` static method +- [ ] If supporting OAuth 2.0, set OAUTH2_URL as class attribute +- [ ] Consider implementing `validate_scopes()` for scope validation +- [ ] Consider implementing `get_provider_metadata()` for provider versioning + +## Getting help + +- Review existing providers for examples +- Check the base provider classes for available methods +- Look at test files for testing patterns +- Ask in Keep's GitHub discussions or issues +- Review the [Provider Methods documentation](/providers/provider-methods) for advanced capabilities +- Understand [Linked vs Connected Providers](/providers/linked-providers) diff --git a/docs/providers/documentation/airflow-provider.mdx b/docs/providers/documentation/airflow-provider.mdx new file mode 100644 index 0000000000..2dd684109d --- /dev/null +++ b/docs/providers/documentation/airflow-provider.mdx @@ -0,0 +1,155 @@ +--- +title: "Airflow" +sidebarTitle: "Airflow Provider" +description: "The Airflow provider integration allows you to send alerts (e.g. DAG failures) from Airflow to Keep via webhooks." +--- +import AutoGeneratedSnippet from '/snippets/providers/airflow-snippet-autogenerated.mdx'; + +## Overview + +[Apache Airflow](https://airflow.apache.org/docs/apache-airflow/stable/index.html) is an open-source tool for programmatically authoring, scheduling, and monitoring data pipelines. Airflow's extensible Python framework enables you to build workflows that connect with virtually any technology. When working with Airflow, it's essential to monitor the health of your DAGs and tasks to ensure that your data pipelines run smoothly. The Airflow Provider integration allows seamless communication between Airflow and Keep, so you can forward alerts, such as task failures, directly to Keep via webhook configurations. + +![Apache Airflow](/images/airflow_1.png) + +## Connecting Airflow to Keep + +### Alert Integration via Webhook + +To connect Airflow to Keep, configure Airflow to send alerts using Keep's webhook. You must provide: + +- **Keep Webhook URL**: The webhook URL provided by Keep (for example, `https://api.keephq.dev/alerts/event/airflow`). +- **Keep API Key**: The API key generated on Keep's platform, which is used for authentication. + +A common method to integrate Airflow with Keep is by configuring alerts through [Airflow Callbacks](https://airflow.apache.org/docs/apache-airflow/stable/administration-and-deployment/logging-monitoring/callbacks.html). For instance, when an Airflow task fails, a callback can send an alert to Keep via the webhook. + +There are several steps to implement this: + +### Step 1: Define Keep's Alert Information + +Structure your alert payload with the following information: + +```python +data = { + "name": "Airflow Task Failure", + "description": "Task keep_task failed in DAG keep_dag", + "status": "firing", + "service": "pipeline", + "severity": "critical", +} +``` + +### Step 2: Configure Keep's Webhook Credentials + +To send alerts to Keep, configure the webhook URL and API key. Below is an example of how to send an alert using Python: + +> **Note**: You need to set up the `KEEP_API_KEY` environment variable with your Keep API key. + +```python +import os +import requests + +def send_alert_to_keep(dag_id, task_id, execution_date, error_message): + # Replace with your specific Keep webhook URL if different. + keep_webhook_url = "https://api.keephq.dev/alerts/event/airflow" + api_key = os.getenv("KEEP_API_KEY") + headers = { + "Content-Type": "application/json", + "Accept": "application/json", + "X-API-KEY": api_key, + } + + data = { + "name": f"Airflow Task Failure: {task_id}", + "message": f"Task {task_id} failed in DAG {dag_id} at {execution_date}", + "status": "firing", + "service": "pipeline", + "severity": "critical", + "description": str(error_message), + } + + response = requests.post(keep_webhook_url, headers=headers, json=data) + response.raise_for_status() +``` + +### Step 3: Configure the Airflow Callback Function + +Now, configure the callback so that an alert is sent to Keep when a task fails. You can attach this callback to one or more tasks in your DAG as shown below: + +```python +import os +import requests +from datetime import datetime +from datetime import timedelta + +from airflow import DAG +from airflow.operators.bash_operator import BashOperator + +default_args = { + 'owner': 'airflow', + 'depends_on_past': False, + 'email_on_failure': False, + 'email_on_retry': False, + 'retries': 1, + 'retry_delay': timedelta(minutes=5), +} + +def send_alert_to_keep(dag_id, task_id, execution_date, error_message): + # Replace with your specific Keep webhook URL if different. + keep_webhook_url = "https://api.keephq.dev/alerts/event/airflow" + api_key = os.getenv("KEEP_API_KEY") + headers = { + "Content-Type": "application/json", + "Accept": "application/json", + "X-API-KEY": api_key, + } + + data = { + "name": f"Airflow Task Failure: {task_id}", + "message": f"Task {task_id} failed in DAG {dag_id} at {execution_date}", + "status": "firing", + "service": "pipeline", + "severity": "critical", + "description": str(error_message), + } + + response = requests.post(keep_webhook_url, headers=headers, json=data) + response.raise_for_status() + +def task_failure_callback(context): + send_alert_to_keep( + dag_id=context["dag"].dag_id, + task_id=context["task_instance"].task_id, + execution_date=context["execution_date"], + error_message=context.get("exception", "Unknown error"), + ) + +dag = DAG( + dag_id="keep_dag", + default_args=default_args, + description="A simple DAG with Keep integration", + schedule_interval=None, + start_date=datetime(2025, 1, 1), + catchup=False, +) + +task = BashOperator( + task_id="keep_task", + bash_command="exit 1", + dag=dag, + on_failure_callback=task_failure_callback, +) +``` + +### Step 4: Observe Alerts in Keep + +After setting up the above configuration, any failure in your Airflow tasks will trigger an alert that is sent to Keep via the configured webhook. You can then view, manage, and respond to these alerts using the Keep dashboard. + +![Keep Alerts](/images/airflow_2.png) + + + +## Useful Links + +- [Airflow Documentation](https://airflow.apache.org/docs/apache-airflow/stable/index.html) +- [Airflow Callbacks](https://airflow.apache.org/docs/apache-airflow/stable/administration-and-deployment/logging-monitoring/callbacks.html) +- [Airflow Connection](https://airflow.apache.org/docs/apache-airflow/stable/howto/connection.html) diff --git a/docs/providers/documentation/aks-provider.mdx b/docs/providers/documentation/aks-provider.mdx index e92cae3449..88f76d86d3 100644 --- a/docs/providers/documentation/aks-provider.mdx +++ b/docs/providers/documentation/aks-provider.mdx @@ -2,25 +2,7 @@ title: "Azure AKS" description: "Azure AKS provider to view kubernetes resources." --- - -## Inputs - -- **command_type** (required): The command type to operate on the k8s cluster (`get_pods`, `get_pvc`, `get_node_pressure`). - -## Outputs - -Azure AKS Provider currently support the `query` function. - -## Authentication Parameters - -The Azure AKS Provider uses subscription_id, resource_name, resource_group_name, client_id, client_secret and tenant_id to allow you to query your cluster resources. You need to provide the following authentication parameters to connect: - -- **subscription_id** (required): The subscription id of your azure account. -- **client_id** (required): The client id from your rbac config generated in azure. -- **client_secret** (required): The client secret from your rbac config generated in azure. -- **tenant_id** (required): The tenant id from your rbac config generated in azure. -- **resource_group_name** (required): The resource group name where your aks is created. -- **resource_name** (required): The cluster name of your aks. +import AutoGeneratedSnippet from '/snippets/providers/aks-snippet-autogenerated.mdx'; ## Connecting with the Provider @@ -43,6 +25,8 @@ To connect to Azure AKS, follow below steps: - This provider allows you to interact with Azure AKS to query resources in kubernetes cluster. + + ## Useful Links - [Azure AKS List Cluster User Creds](https://learn.microsoft.com/en-us/rest/api/aks/managed-clusters/list-cluster-user-credentials?view=rest-aks-2023-08-01&tabs=HTTP) diff --git a/docs/providers/documentation/amazonsqs-provider.mdx b/docs/providers/documentation/amazonsqs-provider.mdx new file mode 100644 index 0000000000..967c3abfdb --- /dev/null +++ b/docs/providers/documentation/amazonsqs-provider.mdx @@ -0,0 +1,67 @@ +--- +title: "AmazonSQS Provider" +sidebarTitle: "AmazonSQS Provider" +description: "The AmazonSQS provider enables you to pull & push alerts to the Amazon SQS Queue." +--- +import AutoGeneratedSnippet from '/snippets/providers/amazonsqs-snippet-autogenerated.mdx'; + +## Overview + +The **AmazonSQS Provider** facilitates +Consuming SQS messages as alerts +Notifying/Pushing messages to SQS Queue + + + +## Inputs for AmazonSQS Action + +- `message`: str: Body/Message for the notification +- `group_id`: str | None: Mandatory only if Queue is of type FIFO, ignored incase of a normal Queue. +- `dedup_id`: str | None: Mandatory only if Queue is of type FIFO, ignored incase of a normal Queue. +- **kwargs: dict | None: You can pass additional key-value pairs, that will be sent as MessageAttributes in the notification. + +## Output for AmazonSQS Action +For more detail, visit [sqs-documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sqs/client/send_message.html#). + ```json + { + 'MD5OfMessageBody': 'string', + 'MD5OfMessageAttributes': 'string', + 'MD5OfMessageSystemAttributes': 'string', + 'MessageId': 'string', + 'SequenceNumber': 'string' + } + ``` + + + - When using the AmazonSQS action, if your queue is fifo, then it is **mandatory** to pass a dedup_id & group_id. + - All the extra fields present in the MessageAttribute is stored in alert.label as a key-value pair dictionary. + - You can pass these attributes in the SQS Queue message and keep will extract and use these field for the alert + - name + - status: Possible values 'firing' | 'resolved' | 'acknowledged' | 'suppressed' | 'pending' defaults to 'firing'. + - severity: Possible values 'critical' | 'high' | 'warning' | 'info' | 'low' defaults to 'high' + - description + + + + +Permissions needed for the key-id pair are: +1. AmazonSQSFullAccess: If you want to notify + receive, this is sqs::read + sqs::write scope. +2. AmazonSQSReadOnlyAccess: If you want to just receive, this is the sqs::read scope. + +You can find these under: +IAM > Users > [YOUR_USER] > Permission > Add Permissions > Add Permissions > Attach policies directly > Search for SQS. + +To create key-id pair, follow this: +1. Search IAM in AWS console, press enter. +2. Go to users +3. Select the user that you want to +4. Click on `Create access key` +5. Select `Third party service`, Click `Next` +6. Add `Description Tag` click `Next` +7. Copy/Download the key-id pair. + + +## Useful Links + +- [AmazonSQS Boto3 Examples](https://docs.aws.amazon.com/code-library/latest/ug/python_3_sqs_code_examples.html) +- [Boto3 SQS Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sqs.html) diff --git a/docs/providers/documentation/anthropic-provider.mdx b/docs/providers/documentation/anthropic-provider.mdx new file mode 100644 index 0000000000..3a6223889a --- /dev/null +++ b/docs/providers/documentation/anthropic-provider.mdx @@ -0,0 +1,26 @@ +--- +title: "Anthropic Provider" +description: "The Anthropic Provider allows for integrating Anthropic's Claude language models into Keep." +--- +import AutoGeneratedSnippet from '/snippets/providers/anthropic-snippet-autogenerated.mdx'; + + + The Anthropic Provider supports querying Claude language models for prompt-based + interactions. + + +## Outputs + +Currently, the Claude Provider outputs the response from the model based on the prompt provided. + +## Connecting with the Provider + +To connect to Claude, you'll need to obtain an API Key: + +1. Log in to your Anthropic account at [Anthropic Console](https://console.anthropic.com). +2. Navigate to the **API Keys** section. +3. Click on **Create Key** to generate a new API key for Keep. + +Use the generated API key in the `authentication` section of your Claude Provider configuration. + + diff --git a/docs/providers/documentation/appdynamics-provider.mdx b/docs/providers/documentation/appdynamics-provider.mdx index eef0e7a762..8e265679d3 100644 --- a/docs/providers/documentation/appdynamics-provider.mdx +++ b/docs/providers/documentation/appdynamics-provider.mdx @@ -3,23 +3,105 @@ title: "AppDynamics" sidebarTitle: "AppDynamics Provider" description: "AppDynamics provider allows you to get AppDynamics `alerts/actions` via webhook installation" --- +import AutoGeneratedSnippet from '/snippets/providers/appdynamics-snippet-autogenerated.mdx'; -## Authentication Parameters -The AppDynamics provider requires the following authentication parameter: - -- `AppDynamics Username`: Required. This is your AppDynamics account username. -- `AppDynamics Password`: This is the password associated with your AppDynamics Username. -- `AppDynamics Account Name`: This is your account's name. -- `App Id`: The Id of the Application in which you would like to install the webhook. -- `Host`: This is the hostname of the AppDynamics instance you wish to connect to. It identifies the AppDynamics server that the API will interact with. + ## Connecting with the Provider +1. Ensure you have a AppDynamics account with the necessary [permissions](https://docs.appdynamics.com/accounts/en/cisco-appdynamics-on-premises-user-management/roles-and-permissions). The basic permissions required are `Account Owner` or `Administrator`. Alternatively you can create an account [instructions](https://docs.appdynamics.com/accounts/en/global-account-administration/access-management/manage-user-accounts) + +## Provider configuration -Obtain AppDynamics Username and Password: -1. Ensure you have a AppDynamics account with the necessary [permissions](https://docs.appdynamics.com/accounts/en/cisco-appdynamics-on-premises-user-management/roles-and-permissions). The basic permissions required are `Account Owner` or `Administrator`. Alternatively you can create an account (instructions)[https://docs.appdynamics.com/accounts/en/global-account-administration/access-management/manage-user-accounts] -2. Find your account name [here](https://accounts.appdynamics.com/overview). +1. Find your account name [here](https://accounts.appdynamics.com/overview). +2. Get the appId of the Appdynamics instance in which you wish to install the webhook into. 3. Determine the Host [here](https://accounts.appdynamics.com/overview). -4. Get the appId of the Appdynamics instance in which you wish to install the webhook into. + +### Basic Auth authentication + +1. Obtain AppDynamics **Username** and **Password** +2. Go to **Basic Auth** tab under **Authentication** section +3. Enter **Username** and **Password** + + + Keep add AppDynamics Username and Password + + +### Access Token authentication + +1. Log in to the **Controller UI** as an **Account Owner** or other roles with the **Administer users**, **groups**, **roles** permission. +2. Go to **Administration** + + + AppDynamics Administration + + +3. Go to **API Client** tab + + + AppDynamics API Client tab + + +4. Click **+ Create** + + + Create new AppDynamics API Client + + +5. Fill Client **Name** and **Description** +6. Click **Generate Secret** + + + AppDynamics generate API Client Secret + + + + This API Client secret is not an authentication token yet + + +7. Add **Account Owner** and/or **Administrator** roles + + + AppDynamics add API Client roles + + +8. Click **Save** + + + AppDynamics save API Client + + +9. Click **Generate Temporary Token** + + + AppDynamics Generate API Client Temporary Access Token + + + + This token is not persistent, but since Keep uses it just once to install Webhook, we will use it without oAuth + + +10. Click **Save** one again + + This is important. Otherwise generated token will not be saved and authentication will fail + +11. Copy generated token + + + AppDynamics copy API Client Temporary Access Token + + +12. Go to **Access Token** tab under **Authentication** section + + + Keep add AppDynamics Access Token + + +13. Enter Access Token + +## Connecting provider + +1. Ensure **Install webhook** is checked +2. Click **Connect** ## Webhook Integration Modifications diff --git a/docs/providers/documentation/argocd-provider.mdx b/docs/providers/documentation/argocd-provider.mdx new file mode 100644 index 0000000000..23a514aaaa --- /dev/null +++ b/docs/providers/documentation/argocd-provider.mdx @@ -0,0 +1,32 @@ +--- +title: "ArgoCD Provider" +sidebarTitle: "ArgoCD Provider" +description: "The ArgoCD provider enables you to pull topology and Application data." +--- +import AutoGeneratedSnippet from '/snippets/providers/argocd-snippet-autogenerated.mdx'; + +## Overview + +The **ArgoCD Provider** facilitates pulling Topology and Application data from ArgoCD. +ArgoCD Applications are mapped to Keep Services +ArgoCD ApplicationSets are mapped to Keep Applcations + + + +## Connecting with the Provider + +1. Obtain the **access token** from your ArgoCD instance by following `Generate auth token` from [ArgoCD's User management docs](https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#manage-users). +2. Set the **deployment URL** to your ArgoCD instance's base URL (e.g., `https://localhost:8080`). + +## Features + +The **ArgoCD Provider** supports the following key features: + +- **Topology**: Configures the Topology usin the applications from ArgoCD. +- **Applications**: Creates Applications using the ApplicationSets from ArgoCD. + + +## Useful Links + +- [ArgoCD API Documentation](https://argo-cd.readthedocs.io/en/stable/developer-guide/api-docs) +- [ArgoCD User Management](https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#local-usersaccounts) diff --git a/docs/providers/documentation/asana-provider.mdx b/docs/providers/documentation/asana-provider.mdx new file mode 100644 index 0000000000..c495564cab --- /dev/null +++ b/docs/providers/documentation/asana-provider.mdx @@ -0,0 +1,34 @@ +--- +title: "Asana" +sidebarTitle: "Asana Provider" +description: "Asana Provider allows you to create and update tasks in Asana" +--- +import AutoGeneratedSnippet from '/snippets/providers/asana-snippet-autogenerated.mdx'; + + + +## Connecting with the Provider + +1. Go to [Asana](https://app.asana.com/0/developer-console) + + + + + +2. Click on `Create New Personal Access Token`. + + + + + +3. Give it a name and click on `Create`. + +4. Copy the generated token. This will be used as the `Personal Access Token` in the provider settings. + + + + + +## Useful Links + +- [Asana](https://asana.com) diff --git a/docs/providers/documentation/auth0-provider.mdx b/docs/providers/documentation/auth0-provider.mdx new file mode 100644 index 0000000000..1f7611fbf1 --- /dev/null +++ b/docs/providers/documentation/auth0-provider.mdx @@ -0,0 +1,18 @@ +--- +title: "Auth0" +sidebarTitle: "Auth0 Provider" +description: "Auth0 provider allows interaction with Auth0 APIs for authentication and user management." +--- +import AutoGeneratedSnippet from '/snippets/providers/auth0-snippet-autogenerated.mdx'; + + + +## Connecting with the Provider + +The Auth0 provider connects to both the **Authentication API** and the **Management API**, enabling functionality such as token-based authentication and user management. Depending on your needs, you can: +- Use the **Authentication API** to obtain access tokens, manage user profiles, or handle multi-factor authentication. +- Use the **Management API** to automate the configuration of your Auth0 environment, register applications, manage users, and more. + +## Useful Links +-[Auth0 API Documentation](https://auth0.com/docs/api) +-[Auth0 as an authentication method for keep](https://docs.keephq.dev/deployment/authentication/auth0-auth) diff --git a/docs/providers/documentation/axiom-provider.mdx b/docs/providers/documentation/axiom-provider.mdx index ffc5ebf420..13a8d5a85d 100644 --- a/docs/providers/documentation/axiom-provider.mdx +++ b/docs/providers/documentation/axiom-provider.mdx @@ -2,26 +2,9 @@ title: "Axiom Provider" description: "Axiom Provider is a class that allows to ingest/digest data from Axiom." --- +import AutoGeneratedSnippet from '/snippets/providers/axiom-snippet-autogenerated.mdx'; -## Inputs - -- **query** (required): AQL to execute -- **dataset** (required): Dataset to query -- **organization_id** (optional): Override the given organization id from configuration -- **nocache** (optional): Whether to cache the response or not -- **startTime** (optional): Start time, defaults to UTC now in ISO format. -- **endTime** (optional): End time, defaults to UTC now in ISO format. - -## Outputs - -Axiom does not currently support the `notify` function. - -## Authentication Parameters - -The Axiom Provider uses API token authentication. You need to provide the following authentication parameters to connect to Axiom: - -- **api_token** (required): Your Axiom API token. -- **organization_id** (optional): The organization ID to access datasets in. + ## Connecting with the Provider @@ -41,6 +24,76 @@ To access datasets, you need to provide the organization ID. You can find your o - The `startTime` and `endTime` parameters use ISO-8601 format. - The `query` function returns the response in JSON format from the Axiom API. +## Webhook Integration + +1. In Axiom, go to the `Monitors` tab in the Axiom dashboad. + + + + + +2. Click on `Notifiers` in the left sidebar and create a new notifier. + + + + + +3. Give it a name and select `Custom Webhook` as kind of notifier. Enter the webhook url as [https://api.keephq.dev/alerts/event/axiom](https://api.keephq.dev/alerts/event/axiom). + + + + + +4. Follow the below steps to create a new API key in Keep. + +5. Go to Keep dashboard and click on the profile icon in the botton left corner and click `Settings`. + + + + + +6. Select `Users and Access` tab and then select `API Keys` tab and create a new API key. + + + + + +7. Give name and select the role as `webhook` and click on `Create API Key`. + + + + + +8. Copy the API key. + + + + + +9. Add a new header with key as `X-API-KEY` and create a new API key in Keep and paste it as the value and save the webhook. + + + + + +10. Go to `Monitors` tab and click on the `Monitors` in the left sidebar and create a new monitor. + + + + + +11. Create a new monitor and select the notifier created in the previous step as per your requirement. Refer [Axiom Monitors](https://axiom.co/docs/monitor-data/monitors) to create a new monitor. + + + + + + + + + +12. Save the monitor. Now, you will receive the alerts in Keep. + ## Useful Links - [Axiom API Documentation](https://axiom.co/docs/restapi/introduction) diff --git a/docs/providers/documentation/azuremonitoring-provider.mdx b/docs/providers/documentation/azuremonitoring-provider.mdx index f47a54be88..0d21cbe976 100644 --- a/docs/providers/documentation/azuremonitoring-provider.mdx +++ b/docs/providers/documentation/azuremonitoring-provider.mdx @@ -1,16 +1,17 @@ --- -title: "Azure Monitoring" -sidebarTitle: "Azure Monitoring Provider" -description: "Azure Monitoring provider allows you to get alerts from Azure Monitoring via webhooks." +title: "Azure Monitor" +sidebarTitle: "Azure Monitor Provider" +description: "Azure Monitorg provider allows you to get alerts from Azure Monitor via webhooks." --- +import AutoGeneratedSnippet from '/snippets/providers/azuremonitoring-snippet-autogenerated.mdx'; ## Overview -The Azure Monitoring Provider integrates Keep with Azure Monitoring, allowing you to receive alerts within Keep's platform. By setting up a webhook in Azure, you can ensure that critical alerts are sent to Keep, allowing for efficient monitoring and response. +The Azure Monitor Provider integrates Keep with Azure Monitor, allowing you to receive alerts within Keep's platform. By setting up a webhook in Azure, you can ensure that critical alerts are sent to Keep, allowing for efficient monitoring and response. -## Connecting Azure Monitoring to Keep +## Connecting Azure Monitor to Keep -Connecting Azure Monitoring to Keep involves creating an Action Group in Azure, adding a webhook action, and configuring the Alert Rule to use the new Action Group. +Connecting Azure Monitor to Keep involves creating an Action Group in Azure, adding a webhook action, and configuring the Alert Rule to use the new Action Group. ### Step 1: Navigate an Action Group 1. Log in to your Azure portal. @@ -72,6 +73,8 @@ Connecting Azure Monitoring to Keep involves creating an Action Group in Azure, + + ## Useful Links - [Azure Monitor alert webhook](https://learn.microsoft.com/en-us/azure/azure-monitor/alerts/alerts-webhooks) - [Azure Monitor alert payload](https://learn.microsoft.com/en-us/azure/azure-monitor/alerts/alerts-payload-samples) diff --git a/docs/providers/documentation/bash-provider.mdx b/docs/providers/documentation/bash-provider.mdx new file mode 100644 index 0000000000..c52e055258 --- /dev/null +++ b/docs/providers/documentation/bash-provider.mdx @@ -0,0 +1,19 @@ +--- +title: "Bash" +sidebarTitle: "Bash Provider" +description: "Bash provider allows executing Bash commands in a workflow, with a limitation for cloud execution." +--- +import AutoGeneratedSnippet from '/snippets/providers/bash-snippet-autogenerated.mdx'; + + + +## Connecting with the Provider + +The Bash provider allows you to run Bash commands or scripts in your workflow. You can pass in any valid Bash command, and it will be executed in a local environment. + +### **Cloud Limitation** +This provider is disabled for cloud environments and can only be used in local or self-hosted environments. + +## Usefull Links +-[Bash Documentation](https://www.gnu.org/savannah-checkouts/gnu/bash/manual/bash.html) + diff --git a/docs/providers/documentation/bigquery-provider.mdx b/docs/providers/documentation/bigquery-provider.mdx new file mode 100644 index 0000000000..5db2b481c8 --- /dev/null +++ b/docs/providers/documentation/bigquery-provider.mdx @@ -0,0 +1,15 @@ +--- +title: "BigQuery" +sidebarTitle: "BigQuery Provider" +description: "BigQuery provider allows interaction with Google BigQuery for querying and managing datasets." +--- +import AutoGeneratedSnippet from '/snippets/providers/bigquery-snippet-autogenerated.mdx'; + + + +## Connecting with the Provider + +1. Create a Google Cloud project and enable the BigQuery API. +2. Create a service account in your Google Cloud project and download the JSON key file. +3. Share the necessary datasets with the service account. +4. Configure your provider using the `service_account_key`, `project_id`, and `dataset`. diff --git a/docs/providers/documentation/centreon-provider.mdx b/docs/providers/documentation/centreon-provider.mdx index 90801fb464..c3bbefc068 100644 --- a/docs/providers/documentation/centreon-provider.mdx +++ b/docs/providers/documentation/centreon-provider.mdx @@ -3,13 +3,9 @@ title: "Centreon" sidebarTitle: "Centreon Provider" description: "Centreon allows you to monitor your infrastructure with ease." --- +import AutoGeneratedSnippet from '/snippets/providers/centreon-snippet-autogenerated.mdx'; -## Authentication Parameters - -The Centreon provider requires the following authentication parameters: - -- `Centreon Host URL`: The URL of the Centreon instance. Example: `https://centreon.example.com`. -- `Centreon API Token`: The API token of an admin user. + ## Connecting with the Provider diff --git a/docs/providers/documentation/checkly-provider.mdx b/docs/providers/documentation/checkly-provider.mdx new file mode 100644 index 0000000000..a2438eb1a2 --- /dev/null +++ b/docs/providers/documentation/checkly-provider.mdx @@ -0,0 +1,120 @@ +--- +title: 'Checkly' +sidebarTitle: 'Checkly Provider' +description: 'Checkly allows you to receive alerts from Checkly using API endpoints as well as webhooks' +--- +import AutoGeneratedSnippet from '/snippets/providers/checkly-snippet-autogenerated.mdx'; + + + +## Connecting Checkly to Keep + +1. Open Checkly dashboard and click on your profile picture in the top right corner. + +2. Click on `User Settings`. + + + + + +3. Open the `API Keys` tab and click on `Create API Key` to generate a new API key. + + + + + +4. Copy the API key. + +5. Open `General` tab under Account Settings and copy the `Account ID`. + + + + + +6. Go to Keep, add Checkly as a provider and enter the API key and Account ID in the respective fields and click on `Connect`. + +## Webhooks Integration + +1. Open Checkly dashboard and open `Alerts` tab in the left sidebar. + + + + + +2. Click on `Add more channels` + + + + + +3. Select `Webhook` from the list of available channels. + + + + + +4. Enter a name for the webhook, select the method as `POST` + +5. Enter [https://api.keephq.dev/alerts/event/checkly](https://api.keephq.dev/alerts/event/checkly) as the URL. + +6. Copy the below snippet and paste in the `Body` of Webhook. Refer the screenshot below for reference. + +```json +{ + "event": "{{ALERT_TITLE}}", + "alert_type": "{{ALERT_TYPE}}", + "check_name": "{{CHECK_NAME}}", + "group_name": "{{GROUP_NAME}}", + "check_id": "{{CHECK_ID}}", + "check_type": "{{CHECK_TYPE}}", + "check_result_id": "{{CHECK_RESULT_ID}}", + "check_error_message": "{{CHECK_ERROR_MESSAGE}}", + "response_time": "{{RESPONSE_TIME}}", + "api_check_response_status_code": "{{API_CHECK_RESPONSE_STATUS_CODE}}", + "api_check_response_status_text": "{{API_CHECK_RESPONSE_STATUS_TEXT}}", + "run_location": "{{RUN_LOCATION}}", + "ssl_days_remaining": "{{SSL_DAYS_REMAINING}}", + "ssl_check_domain": "{{SSL_CHECK_DOMAIN}}", + "started_at": "{{STARTED_AT}}", + "tags": "{{TAGS}}", + "link": "{{RESULT_LINK}}", + "region": "{{REGION}}", + "uuid": "{{$UUID}}" +} +``` + + + + + +7. Go to Headers tab and add a new header with key as `X-API-KEY` and create a new API key in Keep and paste it as the value and save the webhook. + + + + + +8. Follow the below steps to create a new API key in Keep. + +9. Go to Keep dashboard and click on the profile icon in the botton left corner and click `Settings`. + + + + + +10. Select `Users and Access` tab and then select `API Keys` tab and create a new API key. + + + + + +11. Give name and select the role as `webhook` and click on `Create API Key`. + + + + + +12. Use the generated API key in the `X-API-KEY` header of the webhook created in Checkly. + +## Useful Links + +- [Checkly Website](https://www.checklyhq.com/) diff --git a/docs/providers/documentation/checkmk-provider.mdx b/docs/providers/documentation/checkmk-provider.mdx new file mode 100644 index 0000000000..07a426b551 --- /dev/null +++ b/docs/providers/documentation/checkmk-provider.mdx @@ -0,0 +1,92 @@ +--- +title: 'Checkmk' +sidebarTitle: 'Checkmk Provider' +description: 'Checkmk provider allows you to get alerts from Checkmk via webhooks.' +--- +import AutoGeneratedSnippet from '/snippets/providers/checkmk-snippet-autogenerated.mdx'; + +## Overview + +The Checkmk provider enables seamless integration between Keep and Checkmk. It allows you to get alerts from Checkmk to Keep via webhooks making it easier to manage your infrastructure and applications in one place. + +## Connecting Checkmk to Keep + +To connect Checkmk to Keep, you need to configure it as a webhook from Checkmk. Follow the steps below to set up the integration: + +1. Keep webhook script need to installed on the Checkmk server. + +2. You can download the Keep webhook script using the following command: + +```bash +wget -O webhook-keep.py https://github.com/keephq/keep/blob/main/keep/providers/checkmk_provider/webhook-keep.py?raw=true +``` + +3. Copy the downloaded script to the following path on the Checkmk server: + +If you are using Checkmk Docker container, then copy it to the following path according to your docker volume mapping: + +```bash +cp webhook-keep.py /omd/sites//local/share/check_mk/notifications/webhook-keep.py +cd /omd/sites//local/share/check_mk/notifications +``` + +If you are using Checkmk installed on the server, then copy it to the following path: + +```bash +cp webhook-keep.py ~/local/share/check_mk/notifications/webhook-keep.py +cd ~/local/share/check_mk/notifications +``` + +4. Make the script executable: + +```bash +chmod +x webhook-keep.py +``` + +5. Now go to the Checkmk web interface and navigate to Setup + + + + + +6. Click on Notifications under Events + + + + + +6. Click on Add rule + + + + + +7. In the Notifications method method, select "webhook-keep" as the notification method. + + + + + +8. Configure the Rule properties, Contact selections, and Conditions according to your requirements. + +9. The first parameter is the Webhook URL of Keep which is `https://api.keephq.dev/alerts/event/checkmk`. + +10. The second parameter is the API Key of Keep which you can generate in the [Keep settings](https://platform.keephq.dev/settings?selectedTab=users&userSubTab=api-keys). + +11. Click on Save to save the configuration. + +12. Now you will start receiving alerts from Checkmk to Keep via webhooks when the configured conditions are met. + +## Useful Links + +- [Checkmk](https://checkmk.com/) + + diff --git a/docs/providers/documentation/cilium-provider.mdx b/docs/providers/documentation/cilium-provider.mdx new file mode 100644 index 0000000000..2ae0c5fdd9 --- /dev/null +++ b/docs/providers/documentation/cilium-provider.mdx @@ -0,0 +1,213 @@ +--- +title: "Cilium" +sidebarTitle: "Cilium Provider" +description: "Cilium provider enables topology discovery by analyzing network flows between services in your Kubernetes cluster using Hubble." +--- +import AutoGeneratedSnippet from '/snippets/providers/cilium-snippet-autogenerated.mdx'; + + + +## Overview + + + +Cilium provider is in Beta and is not working with authentication yet. + +The current way to pull topology data from your kubernetes cluster, is to run: +```bash +# hubble-relay usually installed at kube-system, but it depends on your cluster. +kubectl port-forward -n kube-system svc/hubble-relay 4245:80 +``` + +and then use `localhost:4245` to pull topology data. + +If you need help with connecting Cilium provider, [reach out](https://slack.keephq.dev). + + + +The Cilium provider leverages Hubble's network flow data to automatically discover service dependencies and build a topology map of your Kubernetes applications. + + + + + + + +## Authentication Parameters + +| Parameter | Description | Example | +|-----------|-------------|----------| +| `cilium_base_endpoint` | The base endpoint of the Cilium Hubble relay | `localhost:4245` | + +## Outputs + +The provider returns topology information including: +- Service names and their dependencies +- Namespace information +- Pod labels and cluster metadata +- Network-based relationships between services + +## Service Discovery Logic + +The provider identifies services using the following hierarchy: +1. Workload name (if available) +2. Kubernetes labels (`k8s:app=` or `k8s:app.kubernetes.io/name=`) +3. Pod name (stripped of deployment suffixes) + +## Requirements + +- A running Kubernetes cluster with Cilium installed +- Hubble enabled and accessible via gRPC +- Network visibility (flow logs) enabled in Cilium + +## Limitations + +- Only captures active network flows between pods +- Service discovery is limited to pods with proper Kubernetes labels +- Requires direct access to the Hubble relay endpoint + +## Useful Links + +- [Cilium Documentation](https://docs.cilium.io/) +- [Hubble Documentation](https://docs.cilium.io/en/stable/hubble/) +- [Kubernetes Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) + +## Google Kubernetes Engine specific + +If you are using a GKE cluster, you cannot connect Keep to the Google-managed hubble-relay directly because: +- hubble-relay operates only in secure mode, +- hubble-relay requires client certificate authentication. + +However, Keep does not currently support these features. + +To work around this, you can add an NGINX Pod that listens on a plaintext HTTP port and proxies requests to hubble-relay secure port using hubble-relay certificates. + + + +You need a GKE cluster with [dataplane v2](https://cloud.google.com/kubernetes-engine/docs/concepts/dataplane-v2) . + +[Dataplane v2 observability](https://cloud.google.com/kubernetes-engine/docs/how-to/configure-dpv2-observability) must be enabled. + + + +Here is an example of running a plaintext NGINX proxy: + +```yaml +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: hubble-relay-insecure-nginx + namespace: gke-managed-dpv2-observability +data: + nginx.conf: | + user nginx; + worker_processes auto; + + error_log /dev/stdout notice; + pid /var/run/nginx.pid; + + events { + worker_connections 1024; + } + + http { + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /dev/stdout main; + + server { + listen 80; + + http2 on; + + location / { + grpc_pass grpcs://hubble-relay.gke-managed-dpv2-observability.svc.cluster.local:443; + + grpc_ssl_certificate /etc/nginx/certs/client.crt; + grpc_ssl_certificate_key /etc/nginx/certs/client.key; + grpc_ssl_trusted_certificate /etc/nginx/certs/hubble-relay-ca.crt; + } + } + } +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: hubble-relay-insecure + namespace: gke-managed-dpv2-observability + labels: + k8s-app: hubble-relay-insecure + app.kubernetes.io/name: hubble-relay-insecure + app.kubernetes.io/part-of: cilium +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: hubble-relay-insecure + template: + metadata: + labels: + k8s-app: hubble-relay-insecure + app.kubernetes.io/name: hubble-relay-insecure + app.kubernetes.io/part-of: cilium + spec: + securityContext: + fsGroup: 1000 + seccompProfile: + type: RuntimeDefault + containers: + - name: frontend + image: nginx:alpine + ports: + - name: http + containerPort: 80 + volumeMounts: + - name: hubble-relay-insecure-nginx-conf + mountPath: /etc/nginx/ + readOnly: true + - name: hubble-relay-client-certs + mountPath: /etc/nginx/certs/ + readOnly: true + volumes: + - configMap: + name: hubble-relay-insecure-nginx + name: hubble-relay-insecure-nginx-conf + - name: hubble-relay-client-certs + projected: + defaultMode: 0400 + sources: + - secret: + name: hubble-relay-client-certs + items: + - key: ca.crt + path: hubble-relay-ca.crt + - key: tls.crt + path: client.crt + - key: tls.key + path: client.key +--- +kind: Service +apiVersion: v1 +metadata: + name: hubble-relay-insecure + namespace: gke-managed-dpv2-observability + labels: + k8s-app: hubble-relay-insecure + app.kubernetes.io/name: hubble-relay-insecure + app.kubernetes.io/part-of: cilium +spec: + type: ClusterIP + selector: + k8s-app: hubble-relay-insecure + ports: + - name: http + port: 80 + targetPort: 80 +``` + +Now you can connect Keep with google-managed hubble-relay by adding Cilium provider using `hubble-relay-insecure.gke-managed-dpv2-observability:80` address. diff --git a/docs/providers/documentation/clickhouse-provider.mdx b/docs/providers/documentation/clickhouse-provider.mdx new file mode 100644 index 0000000000..8739c33b0b --- /dev/null +++ b/docs/providers/documentation/clickhouse-provider.mdx @@ -0,0 +1,22 @@ +--- +title: 'ClickHouse' +sidebarTitle: 'ClickHouse Provider' +description: 'ClickHouse provider allows you to interact with ClickHouse database.' +--- +import AutoGeneratedSnippet from '/snippets/providers/clickhouse-snippet-autogenerated.mdx'; + +## Overview + +ClickHouse is an open-source column-oriented DBMS for online analytical processing that allows users to generate analytical reports using SQL queries in real-time. + + + +## Connecting with the ClickHouse provider + +1. Obtain the required authentication parameters. +2. Add ClickHouse provider to your keep account and configure with the above authentication parameters. + +## Useful Links + +- [ClickHouse](https://clickhouse.com/) +- [ClickHouse Statements](https://clickhouse.com/docs/en/sql-reference/statements/) diff --git a/docs/providers/documentation/cloudwatch-provider.mdx b/docs/providers/documentation/cloudwatch-provider.mdx index 6d2506f4fe..ad2174829b 100644 --- a/docs/providers/documentation/cloudwatch-provider.mdx +++ b/docs/providers/documentation/cloudwatch-provider.mdx @@ -3,6 +3,7 @@ title: "CloudWatch" sidebarTitle: "CloudWatch Provider" description: "CloudWatch provider enables seamless integration with AWS CloudWatch for alerting and monitoring, directly pushing alarms into Keep." --- +import AutoGeneratedSnippet from '/snippets/providers/cloudwatch-snippet-autogenerated.mdx'; ## Overview @@ -23,72 +24,12 @@ To integrate CloudWatch with Keep, you'll need the following: - A configured Keep account with API access. - Appropriate AWS IAM permissions for the CloudWatch provider. -## Required AWS IAM Permissions (Scopes) - -To ensure the CloudWatch provider operates seamlessly, certain AWS IAM permissions (referred to as "scopes") are necessary. These scopes enable the provider to perform actions such as reading alarm details, updating alarm configurations, and subscribing to SNS topics. Below is a list of the required scopes along with explanations: - -### Mandatory Scopes - -- **`cloudwatch:DescribeAlarms`** - - **Description**: Necessary to retrieve information about CloudWatch alarms. - - **Documentation**: [API_DescribeAlarms](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarms.html) - - **Alias**: Describe Alarms - - **Mandatory**: Yes - - This scope is crucial for the provider to fetch and list all CloudWatch alarms. - -### Optional Scopes - -- **`cloudwatch:PutMetricAlarm`** - - **Description**: Required to update alarm configurations, particularly to add Keep as an SNS action on alarms. - - **Documentation**: [API_PutMetricAlarm](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_PutMetricAlarm.html) - - **Alias**: Update Alarms - - This scope allows the modification of existing CloudWatch alarms to integrate with Keep notifications. - -- **`sns:ListSubscriptionsByTopic`** - - **Description**: Allows listing all subscriptions for a given SNS topic, enabling Keep to subscribe itself. - - **Documentation**: [SNS Access Policy](https://docs.aws.amazon.com/sns/latest/dg/sns-access-policy-language-api-permissions-reference.html) - - **Alias**: List Subscriptions - - Essential for the provider to manage subscriptions to SNS topics for alarm notifications. - -- **`logs:GetQueryResults`** - - **Description**: Required for retrieving the results of CloudWatch Logs Insights queries. - - **Documentation**: [API_GetQueryResults](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetQueryResults.html) - - **Alias**: Read Query Results - - Enables the provider to fetch query results from CloudWatch Logs Insights. - -- **`logs:DescribeQueries`** - - **Description**: Necessary to describe the results of CloudWatch Logs Insights queries. - - **Documentation**: [API_DescribeQueries](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueries.html) - - **Alias**: Describe Query Results - - This scope is used to access detailed information about queries executed in CloudWatch Logs Insights. - -- **`logs:StartQuery`** - - **Description**: Allows starting CloudWatch Logs Insights queries. - - **Documentation**: [API_StartQuery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html) - - **Alias**: Start Logs Query - - Critical for initiating logs analysis and queries within CloudWatch Logs Insights. - -- **`iam:SimulatePrincipalPolicy`** - - **Description**: Permits Keep to test the scopes of the current IAM role without making any resource modifications. - - **Documentation**: [API_SimulatePrincipalPolicy](https://docs.aws.amazon.com/IAM/latest/APIReference/API_SimulatePrincipalPolicy.html) - - **Alias**: Simulate IAM Policy - - This scope is useful for verifying the permissions associated with the IAM role used by Keep, ensuring it has the necessary access without altering any AWS resources. - -While some scopes are optional, having them configured can enhance the integration capabilities and provide a more comprehensive monitoring solution within Keep. - -### Authentication Configuration - -Connecting CloudWatch to Keep requires: - -- **AWS Access Key & Secret**: Your AWS credentials with access to CloudWatch and SNS. -- **Region**: The AWS region your CloudWatch alarms and SNS topics reside in. -- **Session Token** (optional): Necessary for temporary AWS credentials. -- **CloudWatch SNS Topic** (optional): An ARN or name of the SNS topic for sending notifications. Optional if your alarms are already configured with an SNS topic. - ## Setting Up the Integration For a seamless setup process, ensure your AWS IAM roles are properly configured with the necessary permissions for CloudWatch and SNS access. + + ### Steps: 1. **Configure AWS IAM Roles**: Ensure the IAM role used by the CloudWatch provider has permissions for `cloudwatch:DescribeAlarms`, `cloudwatch:PutMetricAlarm`, `sns:ListSubscriptionsByTopic`, and other relevant actions. diff --git a/docs/providers/documentation/console-provider.mdx b/docs/providers/documentation/console-provider.mdx index a9524241e8..0ff7eba266 100644 --- a/docs/providers/documentation/console-provider.mdx +++ b/docs/providers/documentation/console-provider.mdx @@ -3,26 +3,34 @@ title: "Console" sidebarTitle: "Console Provider" description: "Console provider is sort of a mock provider that projects given alert message to the console." --- +import AutoGeneratedSnippet from '/snippets/providers/console-snippet-autogenerated.mdx'; ## Inputs -- alert_message: The alert message to print to the console + +- message: The alert message to print to the console ## Outputs + This provider has no outputs ## Authentication Parameters + This provider has no authentication ## Connecting with the Provider + This provider doesn't require any connection ## Notes -*No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page* + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ ## Useful Links -*No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page* + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ ## Example + ```python config = { "description": "Console Output Provider", @@ -32,10 +40,12 @@ provider = ProvidersFactory.get_provider( provider_id='mock', provider_type="console", provider_config=config ) provider.notify( - alert_message="Simple alert showing context with name: {name}".format( + message="Simple alert showing context with name: {name}".format( name="John Doe" ) ) ``` ![](/images/console_provider_example.png) + + diff --git a/docs/providers/documentation/coralogix-provider.mdx b/docs/providers/documentation/coralogix-provider.mdx new file mode 100644 index 0000000000..58cad4fe64 --- /dev/null +++ b/docs/providers/documentation/coralogix-provider.mdx @@ -0,0 +1,73 @@ +--- +title: 'Coralogix' +sidebarTitle: 'Coralogix Provider' +description: 'Coralogix provider allows you to send alerts from Coralogix to Keep using webhooks.' +--- +import AutoGeneratedSnippet from '/snippets/providers/coralogix-snippet-autogenerated.mdx'; + +## Overview + +Coralogix is a modern observability platform delivers comprehensive visibility into all your logs, metrics, traces and security events with end-to-end monitoring. + +## Connecting Coralogix to Keep + +To connect Coralogix to Keep, you need to configure it as a webhook from Coralogix. Follow the steps below to set up the integration: + +1. From the Coralogix toolbar, navigate to Data Flow > Outbound Webhooks. + + + + + +2. In the Outbound Webhooks section, click Generic Webhook. + + + + + +3. Click Add New. + + + + + +4. Enter a webhook name and set the URL to `https://api.keephq.dev/alerts/event/coralogix`. +5. Select HTTP method (POST). + + + + + +6. Generate an API key with webhook role from the [Keep settings](https://platform.keephq.dev/settings?selectedTab=api-key). Copy the API key and paste it in the request header in the next step. + + + + + +7. Add a request header with the key "x-api-key" and API key as the value in coralogix webhook configuration. + + + + + +8. Edit the body of the messages that will be sent when the webhook is triggered (optional). +9. Save the configuration. + +## Useful Links + +- [Coralogix Website](https://coralogix.com/) + + + diff --git a/docs/providers/documentation/dash0-provider.mdx b/docs/providers/documentation/dash0-provider.mdx new file mode 100644 index 0000000000..1ea37decdc --- /dev/null +++ b/docs/providers/documentation/dash0-provider.mdx @@ -0,0 +1,84 @@ +--- +title: 'Dash0' +sidebarTitle: 'Dash0 Provider' +description: 'Dash0 provider allows you to get events from Dash0 using webhooks.' +--- +import AutoGeneratedSnippet from '/snippets/providers/dash0-snippet-autogenerated.mdx'; + +## Overview + +Dash0 is modern OpenTelemetry Native Observability, built on CNCF Open Standards such as PromQL, Perses and OTLP with full cost control. + +## Connecting Dash0 to Keep + +To connect Dash0 to Keep, you need to create a webhook in Dash0. + +1. Go to Dash0 dashboard and click on Organization settings. + + + + + +2. Click on `Notification Channels` and create a New notification channel of type `Webhook`. + + + + + +3. Give a name to the webhook and enter [https://api.keephq.dev/alerts/event/dash0](https://api.keephq.dev/alerts/event/dash0) as the URL. + +4. Follow the below steps to create a new API key in Keep. + +5. Go to Keep dashboard and click on the profile icon in the botton left corner and click `Settings`. + + + + + +6. Select `Users and Access` tab and then select `API Keys` tab and create a new API key. + + + + + +7. Give name and select the role as `webhook` and click on `Create API Key`. + + + + + +8. Copy the API key. + + + + + +9. Add a new request header with key `X-API-KEY` and value as the API key copied from Keep and save the webhook. + + + + + +10. Go to `Notifications` under `Alerting` and create a new notification rule if required or change the existing notification rule to use the webhook created. + + + + + +11. Go to `Checks` under `Alerting` and create a new check or edit an existing check to use the notification rule created. + + + + + + + + + +12. Now you will start receiving events in Keep from Dash0. + +## Useful Links + +- [Dash0](https://dash0.com/) + + diff --git a/docs/providers/documentation/databend-provider.mdx b/docs/providers/documentation/databend-provider.mdx new file mode 100644 index 0000000000..cae388fc54 --- /dev/null +++ b/docs/providers/documentation/databend-provider.mdx @@ -0,0 +1,16 @@ +--- +title: 'Databend' +sidebarTitle: 'Databend Provider' +description: 'Databend provider allows you to query databases' +--- +import AutoGeneratedSnippet from '/snippets/providers/databend-snippet-autogenerated.mdx'; + +## Overview + +Databend is an open-source, serverless, cloud-native data lakehouse built on object storage with a decoupled storage and compute architecture. It delivers exceptional performance and rapid elasticity, aiming to be the open-source alternative to Snowflake. + +## Useful Links + +- [Databend](https://www.databend.com/) + + diff --git a/docs/providers/documentation/datadog-provider.mdx b/docs/providers/documentation/datadog-provider.mdx index 6aee49f465..492e15881d 100644 --- a/docs/providers/documentation/datadog-provider.mdx +++ b/docs/providers/documentation/datadog-provider.mdx @@ -3,20 +3,9 @@ title: "Datadog" sidebarTitle: "Datadog Provider" description: "Datadog provider allows you to query Datadog metrics and logs for monitoring and analytics." --- +import AutoGeneratedSnippet from '/snippets/providers/datadog-snippet-autogenerated.mdx'; -## Inputs - -- `query`: str: The query string to search within Datadog metrics and logs. -- `time_range`: dict = None: The time range for the query (e.g., `{'from': 'now-15m', 'to': 'now'}`) -- `source`: str = None: The source type (metrics, traces, logs). - -## Outputs - -_No information yet, feel free to contribute it using the "Edit this page" link at the bottom of the page_ - -## Authentication Parameters - -The `api_key` and `app_key` are required for connecting to the Datadog provider. You can obtain them as described in the "Connecting with the Provider" section. + ## Connecting with the Provider @@ -42,23 +31,6 @@ To obtain the Datadog App Key, follow these steps: Fingerprints in Datadog are calculated based on the `groups` and `monitor_id` fields of an incoming/pulled event. -## Scopes - -Certain scopes may be required to perform specific actions or queries via the Datadog Provider. Below is a summary of relevant scopes and their use cases: - -- monitors_read (Monitors Read) - Required: True - Description: View monitors. -- monitors_write (Monitors Write) - Required: False - Description: Write monitors. (\*_Required for auto-webhook integration_) -- create_webhooks (Integrations Manage) - Required: False - Description: Create webhooks integrations. (\*_Required for auto-webhook integration_) -- metrics_read - Required: False - Description: View metrics. - ## Notes _No information yet, feel free to contribute it using the "Edit this page" link at the bottom of the page_ diff --git a/docs/providers/documentation/deepseek-provider.mdx b/docs/providers/documentation/deepseek-provider.mdx new file mode 100644 index 0000000000..cac19ceb0a --- /dev/null +++ b/docs/providers/documentation/deepseek-provider.mdx @@ -0,0 +1,22 @@ +--- +title: "DeepSeek Provider" +description: "The DeepSeek Provider enables integration of DeepSeek's language models into Keep." +--- +import AutoGeneratedSnippet from '/snippets/providers/deepseek-snippet-autogenerated.mdx'; + + + The DeepSeek Provider supports querying DeepSeek language models for prompt-based + interactions. + + + + +## Connecting with the Provider + +To connect to DeepSeek, you'll need to obtain an API Key: + +1. Sign up for an account at [DeepSeek](https://platform.deepseek.com) +2. Navigate to your account settings +3. Generate an API key for Keep + +Use the generated API key in the `authentication` section of your DeepSeek Provider configuration. \ No newline at end of file diff --git a/docs/providers/documentation/discord-provider.mdx b/docs/providers/documentation/discord-provider.mdx index f75da282b1..9258d94ea7 100644 --- a/docs/providers/documentation/discord-provider.mdx +++ b/docs/providers/documentation/discord-provider.mdx @@ -3,22 +3,9 @@ title: "Discord" sidebarTitle: "Discord Provider" description: "Discord provider is a provider that allows to send notifications to Discord" --- +import AutoGeneratedSnippet from '/snippets/providers/discord-snippet-autogenerated.mdx'; -## Inputs - -- content: str : Message text to send -- components: list[dict] = []: Adding styling or interactive components like emoji,buttons - -Note: for components to work, the webhook must be owned by an application - see https://discord.com/developers/docs/resources/webhook#execute-webhook - - -## Outputs - -_No information yet, feel free to contribute it using the "Edit this page" link the bottom of the page_ - -## Authentication Parameters - -The `webhook_url` associated with the channel requires to trigger the message to the respective channel. + ## Connecting with the Provider @@ -27,31 +14,6 @@ The `webhook_url` associated with the channel requires to trigger the message to - In the left-hand menu, click on "Integrations," and then click on "Webhooks." - Click the "Create Webhook" button, and give your webhook a name. -## Example of usgae - -``` -workflow: - id: discord-example - description: Discord example - triggers: - - type: manual - actions: - - name: discord - provider: - type: discord - config: "{{ providers.discordtest }}" - with: - content: Alerta! - components: - - type: 1 # Action row - components: - - type: 2 # Button - style: 1 # Primary style - label: "Click Me!" - custom_id: "button_click" - -``` - ## Useful Links - https://discord.com/developers/docs/resources/webhook#execute-webhook diff --git a/docs/providers/documentation/dynatrace-provider.mdx b/docs/providers/documentation/dynatrace-provider.mdx new file mode 100644 index 0000000000..ae757a2209 --- /dev/null +++ b/docs/providers/documentation/dynatrace-provider.mdx @@ -0,0 +1,18 @@ +--- +title: "Dynatrace" +sidebarTitle: "Dynatrace Provider" +description: "Dynatrace provider allows integration with Dynatrace for monitoring, alerting, and collecting metrics." +--- +import AutoGeneratedSnippet from '/snippets/providers/dynatrace-snippet-autogenerated.mdx'; + + + +## Connecting with the Provider + +1. Log in to your Dynatrace account and navigate to "Settings" → "Integration" → "Dynatrace API." +2. Generate an API token with appropriate permissions (e.g., Read metrics). +3. Get your environment's Dynatrace URL. +4. Configure the Dynatrace provider using the API token and Dynatrace URL. + +## Useful Links +-[Dynatrace API Documentation](https://docs.dynatrace.com/docs/dynatrace-api) diff --git a/docs/providers/documentation/eks-provider.mdx b/docs/providers/documentation/eks-provider.mdx new file mode 100644 index 0000000000..32d34d7c5b --- /dev/null +++ b/docs/providers/documentation/eks-provider.mdx @@ -0,0 +1,72 @@ +--- +title: "EKS Provider" +description: "EKS provider integrates with AWS EKS and let you interatct with kubernetes clusters hosted on EKS." +--- +import AutoGeneratedSnippet from '/snippets/providers/eks-snippet-autogenerated.mdx'; + + + +## Connecting with the Provider +To connect to Amazon EKS, follow these steps: + +1. Log in to your [AWS Console](https://aws.amazon.com/) + +2. Create an IAM user with EKS permissions: +```bash +aws iam create-user --user-name eks-user +``` + +3. Attach required policies: + +```bash +aws iam attach-user-policy --user-name eks-user --policy-arn arn:aws:iam::aws:policy/AmazonEKSClusterPolicy +aws iam attach-user-policy --user-name eks-user --policy-arn arn:aws:iam::aws:policy/AmazonEKSServicePolicy +``` + +4. Create access keys + +```bash +aws iam create-access-key --user-name eks-user +``` + +You should get: + +``` +{ + "AccessKey": { + "AccessKeyId": "AKIAXXXXXXXXXXXXXXXX", + "SecretAccessKey": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + "Status": "Active" + } +} +``` + +The `AccessKeyId` is your `access_key` and `SecretAccessKey` is your `secret_access_key`. + +5. Note your cluster name and region from the EKS console or using: + +```bash +aws eks list-clusters --region +``` + +## Required Permissions +The AWS IAM user needs these permissions: + +1. **eks:DescribeCluster** +2. **eks:ListClusters** + +Additional permissions for specific operations: + +3. **eks:AccessKubernetesApi** for pod/deployment operations +4. **eks:UpdateCluster** for scaling operations + +| Command | AWS IAM Permissions | +|---------|-------------------| +| `get_pods` | `eks:DescribeCluster`
`eks:AccessKubernetesApi` | +| `get_pvc` | `eks:DescribeCluster`
`eks:AccessKubernetesApi` | +| `get_node_pressure` | `eks:DescribeCluster`
`eks:AccessKubernetesApi` | +| `get_deployment` | `eks:DescribeCluster`
`eks:AccessKubernetesApi` | +| `scale_deployment` | `eks:DescribeCluster`
`eks:AccessKubernetesApi` | +| `exec_command` | `eks:DescribeCluster`
`eks:AccessKubernetesApi` | +| `restart_pod` | `eks:DescribeCluster`
`eks:AccessKubernetesApi` | +| `get_pod_logs` | `eks:DescribeCluster`
`eks:AccessKubernetesApi` | diff --git a/docs/providers/documentation/elastic-provider.mdx b/docs/providers/documentation/elastic-provider.mdx index d1b8ea4ece..796af80fbb 100644 --- a/docs/providers/documentation/elastic-provider.mdx +++ b/docs/providers/documentation/elastic-provider.mdx @@ -1,21 +1,11 @@ --- title: "Elastic" sidebarTitle: "Elastic Provider" -description: "Elastic provider is a provider used to query Elastic Search (tested with elastic.co)" +description: "Elastic provider is a provider used to query Elasticsearch (tested with elastic.co)" --- +import AutoGeneratedSnippet from '/snippets/providers/elastic-snippet-autogenerated.mdx'; -## Inputs - -- query: str | dict: The query to search Elastic Search with (either SQL/EQL) -- index: str = None: The index to search on (**If index is None, query must be SQL**) - -## Outputs - -_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ - -## Authentication Parameters - -The `api_key` and `cloud_id` are required for connecting to the Elastic provider. You can obtain them as described in the "Connecting with the Provider" section. + ## Connecting with the Provider @@ -35,11 +25,3 @@ To obtain the Elastic Cloud ID, follow these steps: 1. Log in to your elastic.co account 2. Go to the "Elasticsearch Service" section 3. Find the "Cloud ID" in the Overview page. - -## Notes - -_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ - -## Useful Links - -_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ diff --git a/docs/providers/documentation/flashduty-provider.mdx b/docs/providers/documentation/flashduty-provider.mdx new file mode 100644 index 0000000000..19aa262f8b --- /dev/null +++ b/docs/providers/documentation/flashduty-provider.mdx @@ -0,0 +1,30 @@ +--- +title: "Flashduty" +sidebarTitle: "Flashduty Provider" +description: "Flashduty docs" +--- +import AutoGeneratedSnippet from '/snippets/providers/flashduty-snippet-autogenerated.mdx'; + +![Flashduty](/images/flashduty_1.png) + + + +## Integration Key Generation + +The Flashduty gets integration key as an authentication method + +1.Enter the Flashduty console, select Integration Center => Alert Events to enter the integration selection page + +![Flashduty](/images/flashduty_2.png) + +2.Select Keep integration +3.Define a name for the current integration +4.Configure default routing and select the corresponding channel +5.Copy the integration Key to Keep +6.Complete the integration configuration + +![Flashduty](/images/flashduty_3.png) + +## Useful Links + +- https://docs.flashcat.cloud/en/flashduty/keep-alert-integration-guide?nav=01JCQ7A4N4WRWNXW8EWEHXCMF5 diff --git a/docs/providers/documentation/fluxcd-provider.mdx b/docs/providers/documentation/fluxcd-provider.mdx new file mode 100644 index 0000000000..f59bb6c1da --- /dev/null +++ b/docs/providers/documentation/fluxcd-provider.mdx @@ -0,0 +1,140 @@ +--- +title: "Flux CD" +sidebarTitle: "Flux CD Provider" +description: "Flux CD Provider enables integration with Flux CD for GitOps topology and alerts." +--- +import AutoGeneratedSnippet from '/snippets/providers/fluxcd-snippet-autogenerated.mdx'; + + + +## Overview + +Flux CD is a GitOps tool for Kubernetes that provides continuous delivery through automated deployment, monitoring, and management of applications. This provider allows you to integrate Flux CD with Keep to get a single pane of glass for monitoring your GitOps deployments. + +## Features + +### Topology + +The Flux CD provider pulls topology data from the following Flux CD resources: + +- GitRepositories +- HelmRepositories +- HelmCharts +- OCIRepositories +- Buckets +- Kustomizations +- HelmReleases + +The topology shows the relationships between these resources, allowing you to visualize the GitOps deployment process. Resources are categorized as: + +- **Source**: GitRepositories, HelmRepositories, OCIRepositories, Buckets +- **Deployment**: Kustomizations, HelmReleases + +### Alerts + +The Flux CD provider gets alerts from two sources: + +1. Kubernetes events related to Flux CD controllers +2. Status conditions of Flux CD resources (GitRepositories, Kustomizations, HelmReleases) + +Alerts include: + +- Failed GitRepository operations +- Failed Kustomization operations +- Failed HelmRelease operations +- Non-ready resources + +Alert severity is determined based on: +- **Critical**: Events with "failed", "error", "timeout", "backoff", or "crash" in the reason +- **High**: Other warning events +- **Info**: Normal events + +## Connecting with the Provider + +The Flux CD provider supports multiple authentication methods: + +1. **Kubeconfig file content** (recommended for external access) +2. **API server URL and token** +3. **In-cluster configuration** (when running inside a Kubernetes cluster) +4. **Default kubeconfig file** (from ~/.kube/config) + +### Using Kubeconfig + +```yaml +apiVersion: keep.sh/v1 +kind: Provider +metadata: + name: flux-cd +spec: + type: fluxcd + authentication: + kubeconfig: | + apiVersion: v1 + kind: Config + clusters: + - name: my-cluster + cluster: + server: https://kubernetes.example.com + certificate-authority-data: BASE64_ENCODED_CA_CERT + users: + - name: my-user + user: + token: MY_TOKEN + contexts: + - name: my-context + context: + cluster: my-cluster + user: my-user + current-context: my-context + context: my-context + namespace: flux-system +``` + +### Using API Server and Token + +```yaml +apiVersion: keep.sh/v1 +kind: Provider +metadata: + name: flux-cd +spec: + type: fluxcd + authentication: + api-server: https://kubernetes.example.com + token: MY_TOKEN + namespace: flux-system +``` + +> Note: Both `api-server` and `api_server` formats are supported for backward compatibility. + +### Using In-Cluster Configuration + +```yaml +apiVersion: keep.sh/v1 +kind: Provider +metadata: + name: flux-cd +spec: + type: fluxcd + authentication: + namespace: flux-system +``` + +## Comparison with ArgoCD Provider + +Keep supports both Flux CD and ArgoCD for GitOps deployments. Here's a comparison of the two providers: + +| Feature | Flux CD | ArgoCD | +|---------|---------|--------| +| Topology | ✅ | ✅ | +| Alerts | ✅ | ✅ | +| Resource Types | GitRepositories, HelmRepositories, Kustomizations, HelmReleases | Applications, Projects | +| Authentication | Kubeconfig, API Server, In-Cluster | Username/Password, Token | +| Deployment Model | Kubernetes Controllers | Server + Controllers | +| UI Integration | No (CLI only) | Yes (Web UI) | + +## Related Resources + +- [Flux CD Documentation](https://fluxcd.io/docs/) +- [Flux CD GitHub Repository](https://github.com/fluxcd/flux2) +- [Keep Documentation](https://docs.keephq.dev) diff --git a/docs/providers/documentation/gcpmonitoring-provider.mdx b/docs/providers/documentation/gcpmonitoring-provider.mdx index 8a31e79c0a..6e449bb4d0 100644 --- a/docs/providers/documentation/gcpmonitoring-provider.mdx +++ b/docs/providers/documentation/gcpmonitoring-provider.mdx @@ -1,78 +1,110 @@ --- title: "GCP Monitoring" sidebarTitle: "GCP Monitoring Provider" -description: "GCP Monitoringing provider allows you to get alerts from Azure Monitoring via webhooks." +description: "GCP Monitoring provider allows you to get alerts and logs from GCP Monitoring via webhooks and log queries." --- +import AutoGeneratedSnippet from '/snippets/providers/gcpmonitoring-snippet-autogenerated.mdx'; ## Overview -The GCP Monitoring Provider enables seamless integration between Keep and GCP Monitoring, allowing alerts from GCP Monitoring to be directly sent to Keep through webhook configurations. This integration ensures that critical alerts are efficiently managed and responded to within Keep's platform. + +The GCP Monitoring Provider enables seamless integration between Keep and GCP Monitoring, allowing alerts from GCP Monitoring to be directly sent to Keep through webhook configurations. In addition to alerts, the provider now supports querying log entries from GCP Logging, enabling a comprehensive view of alerts and associated logs within Keep's platform. ## Connecting GCP Monitoring to Keep -To connect GCP Monitoring to Keep, you'll need to configure a webhook as a notification channel in GCP Monitoring and then link it to the desired alert policy. + +### Alert Integration via Webhook + +To connect GCP Monitoring alerts to Keep, configure a webhook as a notification channel in GCP Monitoring and link it to the desired alert policy. ### Step 1: Access Notification Channels + Log in to the Google Cloud Platform console. Navigate to **Monitoring > Alerting > Notification channels**. - - + + ### Step 2: Add a New Webhook + Within the Webhooks section, click on **ADD NEW**. - - + + ### Step 3: Configure the Webhook + In the Endpoint URL field, enter the webhook URL provided by Keep. -- For Display Name, use keep-gcpmonitoring-webhook-integration. -- Enable Use HTTP Basic Auth and input the following credentials: - - Auth Username: **api_key** - - Auth Password: **%YOURAPIKEY%** - - - + +- **Display Name**: keep-gcpmonitoring-webhook-integration +- Enable **Use HTTP Basic Auth** and input the following credentials: + - **Auth Username**: `api_key` + - **Auth Password**: `%YOURAPIKEY%` + + + ### Step 4: Save the Webhook Configuration -- Click on Save to store the webhook configuration. + +- Click **Save** to store the webhook configuration. ### Step 5: Associate the Webhook with an Alert Policy Navigate to the alert policy you wish to send notifications from to Keep. -- Click on Edit. -- Under "Notifications and name," find the Notification Channels section and select the keep-gcpmonitoring-webhook-integration channel you created. -- Save the changes by clicking on SAVE POLICY. - - - - +- Click **Edit**. +- Under "Notifications and name," find the **Notification Channels** section and select the `keep-gcpmonitoring-webhook-integration` channel you created. +- Save the changes by clicking on **SAVE POLICY**. - - + + -### Step 6: Review the alert in Keep + + + - - +### Step 6: Review the Alert in Keep + +Once the setup is complete, alerts from GCP Monitoring will start appearing in Keep. + + + +## Log Query Integration + +The GCP Monitoring Provider also supports querying logs from GCP Logging, allowing you to fetch log entries based on specific filters. This is helpful for enriching alert data with related logs or for monitoring specific events in Keep. + +### Authentication Requirements + +To enable log querying, you need to provide a service account JSON file with the `logs.viewer` role. This service account should be configured in the `authentication` section of your GCP Monitoring Provider configuration. + +### Querying Logs + +The provider’s `query` function supports filtering logs based on criteria such as resource type, severity, or specific keywords. You can specify a time range for querying logs using `timedelta_in_days`, and control the number of entries with `page_size`. + +#### Example Usage + +Here’s an example of how you might use the provider to query log entries: + +```python +query(filter='resource.type="cloud_run_revision" AND severity="ERROR"', timedelta_in_days=1) +``` + +This will return logs of severity “ERROR” related to Cloud Run revisions from the past day. + +#### Post Installation Validation + +To validate both alerts and logs, follow these steps: + + 1. Alert Validation: Test the webhook by triggering an alert in GCP Monitoring and confirm it appears in Keep. + 2. Log Query Validation: Execute a simple log query and verify that log entries are returned as expected. + ### Useful Links - - [GCP Monitoring Notification Channels](https://cloud.google.com/monitoring/support/notification-options) - - [GCP Monitoring Alerting](https://cloud.google.com/monitoring/alerts) + +- [GCP Monitoring Notification Channels](https://cloud.google.com/monitoring/support/notification-options) +- [GCP Monitoring Alerting](https://cloud.google.com/monitoring/alerts) + + diff --git a/docs/providers/documentation/gemini-provider.mdx b/docs/providers/documentation/gemini-provider.mdx new file mode 100644 index 0000000000..17d76ca98c --- /dev/null +++ b/docs/providers/documentation/gemini-provider.mdx @@ -0,0 +1,22 @@ +--- +title: "Gemini Provider" +description: "The Gemini Provider allows for integrating Google's Gemini language models into Keep." +--- +import AutoGeneratedSnippet from '/snippets/providers/gemini-snippet-autogenerated.mdx'; + + + The Gemini Provider supports querying Gemini language models for prompt-based + interactions. + + + + +## Connecting with the Provider + +To connect to Gemini, you'll need to obtain an API Key: + +1. Go to [Google AI Studio](https://makersuite.google.com/app/apikey). +2. Click on **Create API Key** or use an existing one. +3. Copy your API key for Keep. + +Use the generated API key in the `authentication` section of your Gemini Provider configuration. \ No newline at end of file diff --git a/docs/providers/documentation/github-provider.mdx b/docs/providers/documentation/github-provider.mdx new file mode 100644 index 0000000000..f6331b8bfc --- /dev/null +++ b/docs/providers/documentation/github-provider.mdx @@ -0,0 +1,18 @@ +--- +title: "GitHub" +sidebarTitle: "GitHub Provider" +description: "GitHub provider allows integration with GitHub for managing repositories, issues, pull requests, and more." +--- +import AutoGeneratedSnippet from '/snippets/providers/github-snippet-autogenerated.mdx'; + + + +## Connecting with the Provider + +1. Go to your GitHub account and navigate to **Settings > Developer Settings > Personal Access Tokens**. +2. Generate a token with the required permissions (e.g., `repo`, `workflow`, etc.). +3. Copy the token and provide it as `github_token` in the provider configuration. + +## Useful Links +- [GitHub REST API Documentation](https://docs.github.com/en/rest?apiVersion=2022-11-28) + diff --git a/docs/providers/documentation/github_workflows_provider.mdx b/docs/providers/documentation/github_workflows_provider.mdx index a17721aeea..e48d35a6c3 100644 --- a/docs/providers/documentation/github_workflows_provider.mdx +++ b/docs/providers/documentation/github_workflows_provider.mdx @@ -3,24 +3,9 @@ title: "Github Workflows" sidebarTitle: "Github Workflows Provider" description: "GithubWorkflowProvider is a provider that interacts with Github Workflows API." --- +import AutoGeneratedSnippet from '/snippets/providers/github_workflows-snippet-autogenerated.mdx'; -## Configuration - -The `kwargs` of the `notify` function in **GithubWorkflowProvider** contains the following Parameters -```python -kwargs(dict): - github_url(str): API endpoint to send the request to. (Required*) - github_method(str): GET | POST | DELETE | PUT -``` -Basically the kwargs will be automatically populated by the variables passed under `with` in the workflow file. - -## Outputs - -It returns the the response of the query. - -### Authentication Parameters - -A Github Personal Access Token `GITHUB_PAT` associated with the github account is required to perform the required action. + ## Connecting with the Provider @@ -37,10 +22,6 @@ Create your personal access token (classic) in github See bellow for more info. -## Notes - -_No information yet, feel free to contribute it using the "Edit this page" link the bottom of the page_ - ## Useful Links - [Workflows](https://docs.github.com/en/rest/actions/workflows) diff --git a/docs/providers/documentation/gitlab-provider.mdx b/docs/providers/documentation/gitlab-provider.mdx index 65011c8bc2..f32e50c293 100644 --- a/docs/providers/documentation/gitlab-provider.mdx +++ b/docs/providers/documentation/gitlab-provider.mdx @@ -3,26 +3,9 @@ title: "GitLab Provider" sidebarTitle: "GitLab Provider" description: "GitLab provider is a provider used for creating issues in GitLab" --- +import AutoGeneratedSnippet from '/snippets/providers/gitlab-snippet-autogenerated.mdx'; -## Inputs - -The `notify` function take following parameters as inputs: - -- `id` (required): The global ID or path of the project. -- `title` (required): Title of the Issue/Ticket. -- `description` (optional): Description for the Issue. -- `labels` (optional): Issue labels seperated by a Comma. -- `issue_type` (optional): Issue type name. One of `issue`, `incident`, `test_case` or `task`. Default is `issue`. - -See [documentation](https://docs.gitlab.com/ee/api/issues.html#new-issue) for more - -## Authentication Parameters -The GitLab provider requires the following authentication parameter: - -- `host` (required): GitLab host name of the project. -- `Personal Access Token` (required): Your Personal Access Token with `api` scope. - -See [GitLab Scopes](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#personal-access-token-scopes) for more. + ## Connecting with the Provider @@ -34,3 +17,4 @@ See [GitLab Scopes](https://docs.gitlab.com/ee/user/profile/personal_access_toke - [GitLab PAT](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token) - [GitLab Create New Issue](https://docs.gitlab.com/ee/api/issues.html#new-issue) +- [GitLab Scopes](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#personal-access-token-scopes) diff --git a/docs/providers/documentation/gitlabpipelines-provider.mdx b/docs/providers/documentation/gitlabpipelines-provider.mdx index e112e3f799..719b080fae 100644 --- a/docs/providers/documentation/gitlabpipelines-provider.mdx +++ b/docs/providers/documentation/gitlabpipelines-provider.mdx @@ -1,30 +1,15 @@ --- -title: "Gitlab Pipelines" -sidebarTitle: "Gitlab Pipelines Provider" -description: "GitlabPipelinesProvider is a provider that interacts with GitLab Pipelines API." +title: "GitLab Pipelines" +sidebarTitle: "GitLab Pipelines Provider" +description: "GitLab Pipelines Provider is a provider that interacts with GitLab Pipelines API." --- +import AutoGeneratedSnippet from '/snippets/providers/gitlabpipelines-snippet-autogenerated.mdx'; -## Inputs - -The `kwargs` of the `notify` function in **GitlabPipelinesProvider** contains the following Parameters -```python -kwargs(dict): - gitlab_url(str): API endpoint to send the request to. (Required*) - gitlab_method(str): GET | POST | DELETE | PUT -``` -Basically the kwargs will be automatically populated by the variables passed under `with` in the workflow file. - -## Outputs - -It prints the output in accordance with the response in the following format `Sent {method} request to {url} with status {response_status}` - -## Authentication Parameters - -A Gitlab Personal Access Token `GITLAB_PAT` associated with the gitlab account is required to perform the required action. + ## Connecting with the Provider -Create your personal access token in gitlab +Create your personal access token in GitLab - On the left sidebar, select your avatar. - Select **Edit profile**. - On the left sidebar, select **Access Tokens**. @@ -33,10 +18,7 @@ Create your personal access token in gitlab - Select the desired scopes. - Select Create **personal access token**. -## Notes - -_No information yet, feel free to contribute it using the "Edit this page" link the bottom of the page_ - ## Useful Links -- https://docs.gitlab.com/ee/api/pipelines.html +- [GitLab PAT](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token) +- [GitLab Pipelines API](https://docs.gitlab.com/ee/api/pipelines.html) diff --git a/docs/providers/documentation/gke-provider.mdx b/docs/providers/documentation/gke-provider.mdx new file mode 100644 index 0000000000..2aa1b342e4 --- /dev/null +++ b/docs/providers/documentation/gke-provider.mdx @@ -0,0 +1,18 @@ +--- +title: "Google Kubernetes Engine" +sidebarTitle: "Google Kubernetes Engine Provider" +description: "Google Kubernetes Engine provider allows managing Google Kubernetes Engine clusters and related resources." +--- +import AutoGeneratedSnippet from '/snippets/providers/gke-snippet-autogenerated.mdx'; + + + +## Connecting with the Provider + +1. Obtain Google Cloud credentials by following the steps in [Google Cloud's service account guide](https://cloud.google.com/iam/docs/creating-managing-service-account-keys). +2. Ensure your service account has the necessary permissions to manage GKE clusters (`roles/container.admin`). +3. Provide the `gcp_credentials`, `project_id`, and `zone` in your provider configuration. + +## Usefull Links +-[Google Kubernetes Engine Documentation](https://cloud.google.com/kubernetes-engine/docs) + diff --git a/docs/providers/documentation/google_chat-provider.mdx b/docs/providers/documentation/google_chat-provider.mdx index 486e81856d..952770c130 100644 --- a/docs/providers/documentation/google_chat-provider.mdx +++ b/docs/providers/documentation/google_chat-provider.mdx @@ -3,18 +3,9 @@ title: "Google Chat" sidebarTitle: "Google Chat Provider" description: "Google Chat provider is a provider that allows to send messages to Google Chat" --- +import AutoGeneratedSnippet from '/snippets/providers/google_chat-snippet-autogenerated.mdx'; -## Inputs - -The `notify` function take following parameters as inputs: - -- `message`: Required. Message text to send to Google Chat - -## Outputs - -## Authentication Parameters - -The webhook_url associated with the channel requires to trigger the message to the respective Google Chat space. + ## Connecting with the Provider @@ -27,7 +18,6 @@ The webhook_url associated with the channel requires to trigger the message to t 7. Click Save 8. To copy the webhook URL, click "More", and then click "Copy link". -## Notes ## Useful Links diff --git a/docs/providers/documentation/grafana-provider.mdx b/docs/providers/documentation/grafana-provider.mdx index e47bf67cee..2256200a9f 100644 --- a/docs/providers/documentation/grafana-provider.mdx +++ b/docs/providers/documentation/grafana-provider.mdx @@ -1,23 +1,34 @@ --- title: "Grafana Provider" -description: "Grafana Provider allows either pull/push alerts from Grafana to Keep." +description: "Grafana Provider allows either pull/push alerts and pull Topology Map from Grafana to Keep." --- -Grafana currently supports pulling/pushing alerts. We will add querying and notifying soon. +import AutoGeneratedSnippet from '/snippets/providers/grafana-snippet-autogenerated.mdx'; -## Inputs +Grafana currently supports pulling/pushing alerts & Topology Map. We will add querying and notifying soon. -Grafana Provider does not currently support the `notify` function. + -## Outputs +## Legacy vs Unified Alerting -Grafana Provider does not currently support the `query` function. +Keep supports both Grafana's legacy alerting system and the newer Unified Alerting system. Here are the key differences: -## Authentication Parameters +### Legacy Alerting +- Uses notification channels for alert delivery +- Configured at the dashboard level +- Uses a different API endpoint (`/api/alerts` and `/api/alert-notifications`) +- Simpler setup but fewer features +- Alerts are tightly coupled with dashboard panels -The Grafana Provider uses API token authentication. You need to provide the following authentication parameters to connect to Grafana: +### Unified Alerting (Default from Grafana 9.0) +- Uses alert rules and contact points +- Configured centrally in the Alerting section +- Uses the newer `/api/v1/alerts` endpoint +- More powerful features including label-based routing +- Supports multiple data sources in a single alert rule -- **token** (required): Your Grafana API Token. -- **host** (required): The URL of your Grafana host (e.g., https://keephq.grafana.net). + +If you're using Grafana 8.x or earlier, or have explicitly enabled legacy alerting in newer versions, make sure to configure Keep accordingly using the legacy alerting configuration. + ## Connecting with the Provider @@ -41,38 +52,67 @@ To connect to Grafana, you need to create an API Token: height="200"> -6. Use the token value to the `authentication` section in the Grafana Provider configuration. +6. Use the token value in the `authentication` section in the Grafana Provider configuration. -## Post installation validation +## Post Installation Validation -You can check that the Grafana Provider works by testing Keep's contact point (which installed via the webhook integration). -1. Go to Contact Points (cmd k -> contact) -2. Find the keep-grafana-webhook-integration: +You can check that the Grafana Provider works by testing Keep's contact point (which was installed via the webhook integration). + +1. Go to **Contact Points** (cmd k -> contact). +2. Find the **keep-grafana-webhook-integration**: -3. Click on the "View contact point": +3. Click on the **View contact point**: -4. Click on "Test": +4. Click on **Test**: -5. Go to Keep - you should see an alert from Grafana! +5. Go to Keep – you should see an alert from Grafana! + +**Alternative Validation Methods (When Keep is Not Accessible Externally):** + +If Keep is not accessible externally and the webhook cannot be created, you can manually validate the Grafana provider setup using the following methods: + +1. **Manual Test Alerts in Grafana:** + - Create a manual test alert in Grafana. + - Set up a contact point within Grafana that would normally send alerts to Keep. + - Trigger the alert and check Grafana's logs for errors or confirmation that the alert was sent. + +2. **Check Logs in Grafana:** + - Access Grafana’s log files or use the **Explore** feature to query logs related to the alerting mechanism. + - Ensure there are no errors related to the webhook integration and that alerts are processed correctly. + +3. **Verify Integration Status:** + - Navigate to the **Alerting** section in Grafana. + - Confirm that the integration status shows as active or functioning. + - Monitor any outbound HTTP requests to verify that Grafana is attempting to communicate with Keep. + +4. **Network and Connectivity Check:** + - Use network monitoring tools to ensure Grafana can reach Keep or any alternative endpoint configured for alerts. + + +**Topology Map** is generated from the traces collect by Tempo. +To get the Datasource UID, go to: +1. Connections > Data Sources. +2. Click the Prometheus instance which is scraping data from Tempo > Your URL is in the format `https://host/connections/datasources/edit/` +3. Copy that DATASOURCE_UID and use it while installing the provider. + ## Webhook Integration Modifications -The webhook integration adds Keep as a contact point in the Grafana instance. This integration can be located under the "Contact Points" section. -Keep also gains access to the following scopes: +The webhook integration adds Keep as a contact point in the Grafana instance. This integration can be located under the "Contact Points" section. Keep also gains access to the following scopes: - `alert.provisioning:read` - `alert.provisioning:write` diff --git a/docs/providers/documentation/grafana_incident-provider.mdx b/docs/providers/documentation/grafana_incident-provider.mdx index a59d13df70..7bfee92fd6 100644 --- a/docs/providers/documentation/grafana_incident-provider.mdx +++ b/docs/providers/documentation/grafana_incident-provider.mdx @@ -3,14 +3,9 @@ title: 'Grafana Incident Provider' sidebarTitle: 'Grafana Incident Provider' description: 'Grafana Incident Provider alows you to query all incidents from Grafana Incident.' --- +import AutoGeneratedSnippet from '/snippets/providers/grafana_incident-snippet-autogenerated.mdx'; -## Authentication Parameters - -The Grafana Incident provider requires the following authentication parameters: - -- `host_url` - The URL of the Grafana Incident instance. - Example: `https://your-stack.grafana.net` -- `service_account_token` - The service account token is used to authenticate the Grafana Incident API requests. + ## Getting started @@ -27,6 +22,70 @@ The Grafana Incident provider requires the following authentication parameters: 5. Copy the generated token. 6. This will be used as the `service_account_token` parameter in the provider configuration. +## Creating and updating Grafana Incidents + +Grafana Incident provider supports creating and updating incidents in Grafana. + +- `operationType` - The operation type can be `create` or `update`. +- `updateType` - The update type is used to update the various fields of the incident. + +### Create Incident + +- `operationType` - `create` +- `title` (str) - The title of the incident. +- `severity` (str) - The severity of the incident. +- `labels` (list) - The labels of the incident. +- `roomPrefix` (str) - The room prefix of the incident. +- `isDrill` (bool) - The drill status of the incident. +- `status` (str) - The status of the incident. +- `attachCaption` (str) - The attachment caption of the incident. +- `attachURL` (str) - The attachment URL of the incident. + +### Update Incident + +- `operationType` - `update` +- `updateType` - The updatable fields are `removeLabel`, `unassignLabel`, `unassignLabelByUUID`, `unassignRole`, `updateIncidentEventTime`, `updateIncidentIsDrill`, `updateIncidentSeverity`, `updateIncidentStatus`, `updateIncidentTitle`. + +#### Remove Label +- `incident_id` (str) - The incident ID. +- `label` (str) - The label to remove. + +#### Unassign Label +- `incident_id` (str) - The incident ID. +- `label` (str) - The label to unassign. +- `key` (str) - The key of the label to unassign. + +#### Unassign Label By UUID +- `incident_id` (str) - The incident ID. +- `key_uuid` (str) - The key UUID of the label to unassign. +- `value_uuid` (str) - The value UUID of the label to unassign. + +#### Unassign Role +- `incident_id` (str) - The incident ID. +- `role` (str) - The role to unassign. +- `user_id` (str) - The user ID to unassign. + +#### Update Incident Event Time +- `incident_id` (str) - The incident ID. +- `event_time` (str) - The event time to update. +- `event_name` (str) - The event name to update. + +#### Update Incident Is Drill +- `incident_id` (str) - The incident ID. +- `isDrill` (bool) - The drill status to update. + +#### Update Incident Severity +- `incident_id` (str) - The incident ID. +- `severity` (str) - The severity to update. + +#### Update Incident Status +- `incident_id` (str) - The incident ID. +- `status` (str) - The status to update. + +#### Update Incident Title +- `incident_id` (str) - The incident ID. +- `title` (str) - The title to update. + ## Usefull Links - [Grafana Incident](https://grafana.com/docs/grafana-cloud/alerting-and-irm/incident/) diff --git a/docs/providers/documentation/grafana_loki-provider.mdx b/docs/providers/documentation/grafana_loki-provider.mdx new file mode 100644 index 0000000000..8041095bfc --- /dev/null +++ b/docs/providers/documentation/grafana_loki-provider.mdx @@ -0,0 +1,44 @@ +--- +title: 'Grafana Loki' +sidebarTitle: 'Grafana Loki Provider' +description: 'Grafana Loki provider allows you to query logs from Grafana Loki.' +--- +import AutoGeneratedSnippet from '/snippets/providers/grafana_loki-snippet-autogenerated.mdx'; + +## Overview + +Grafana Loki is a log aggregation system designed to store and query logs from all your applications and infrastructure. The easiest way to get started is with Grafana Cloud, our fully composable observability stack. + + + +## Connecting with the Grafana Loki provider + +1. Obtain the required authentication parameters. +2. Add Grafana Loki provider to your keep account and configure with the above authentication parameters. + +## Querying Grafana Loki + +The Grafana Loki provider allows you to query logs from Grafana Loki through the `query` and `query_range` types. The following are the parameters available for querying: + +1. `query` type: + + - `query`: The [LogQL](https://grafana.com/docs/loki/latest/query/) query to perform. Requests that do not use valid LogQL syntax will return errors. + - `limit`: The max number of entries to return. It defaults to `100`. Only applies to query types which produce a stream (log lines) response. + - `time`: The evaluation time for the query as a nanosecond Unix epoch or another [supported format](https://grafana.com/docs/loki/latest/reference/loki-http-api/#timestamps). Defaults to now. + - `direction`: Determines the sort order of logs. Supported values are `forward` or `backward`. Defaults to `backward`. + +2. `query_range` type: + + - `query`: The [LogQL](https://grafana.com/docs/loki/latest/query/) query to perform. + - `limit`: The max number of entries to return. It defaults to `100`. Only applies to query types which produce a stream (log lines) response. + - `start`: The start time for the query as a nanosecond Unix epoch or another [supported format](https://grafana.com/docs/loki/latest/reference/loki-http-api/#timestamps). Defaults to one hour ago. Loki returns results with timestamp greater or equal to this value. + - `end`: The end time for the query as a nanosecond Unix epoch or another [supported format](https://grafana.com/docs/loki/latest/reference/loki-http-api/#timestamps). Defaults to now. Loki returns results with timestamp lower than this value. + - `since`: A `duration` used to calculate `start` relative to `end`. If `end` is in the future, `start` is calculated as this duration before now. Any value specified for `start` supersedes this parameter. + - `step`: Query resolution step width in `duration` format or float number of seconds. `duration` refers to Prometheus duration strings of the form `[0-9]+[smhdwy]`. For example, 5m refers to a duration of 5 minutes. Defaults to a dynamic value based on `start` and `end`. Only applies to query types which produce a matrix response. + - `interval`: Only return entries at (or greater than) the specified interval, can be a `duration` format or float number of seconds. Only applies to queries which produce a stream response. Not to be confused with step, see the explanation under [Step versus interval](https://grafana.com/docs/loki/latest/reference/loki-http-api/#step-versus-interval). + - `direction`: Determines the sort order of logs. Supported values are `forward` or `backward`. Defaults to `backward`. + +## Useful Links + +- [Grafana Loki](https://grafana.com/oss/loki/) +- [Grafana Loki Authentication](https://grafana.com/docs/loki/latest/operations/authentication/) diff --git a/docs/providers/documentation/grafana_oncall-provider.mdx b/docs/providers/documentation/grafana_oncall-provider.mdx index 0124acae23..faf38d4414 100644 --- a/docs/providers/documentation/grafana_oncall-provider.mdx +++ b/docs/providers/documentation/grafana_oncall-provider.mdx @@ -1,101 +1,40 @@ --- -title: "Grafana Oncall Provider" -description: "Grafana Oncall Provider is a class that allows to ingest/digest data from Grafana On-Call." +title: "Grafana OnCall Provider" +description: "Grafana Oncall Provider is a class that allows to ingest data to the Grafana OnCall." --- +import AutoGeneratedSnippet from '/snippets/providers/grafana_oncall-snippet-autogenerated.mdx'; -## Inputs - -- **title** (required): The title of the incident. -- **roomPrefix** (optional): Prefix for the incident room (default: "incident"). -- **labels** (optional): List of labels to associate with the incident (default: ["keep-generated"]). -- **isDrill** (optional): Whether the incident is a drill or not (default: False). -- **severity** (optional): Severity of the incident (default: "minor"). -- **status** (optional): Status of the incident (default: "active"). -- **attachCaption** (optional): Caption for any attachment. -- **attachURL** (optional): URL for any attachment. -- **incidentID** (optional): ID of an existing incident to update. - -## Outputs - -Grafana Oncall Provider does not currently support the `query` function. - -## Authentication Parameters - -The Grafana Oncall Provider uses API token authentication. You need to provide the following authentication parameters to connect to Grafana On-Call: - -- **token** (required): Your Grafana On-Call API Token. -- **host** (required): The URL of your Grafana On-Call host (e.g., https://keephq.grafana.net). + ## Connecting with the Provider -To connect to Grafana On-Call, you need to create an API Token: +To connect to Grafana OnCall, you need to create an API Token: -1. Log in to your Grafana On-Call account. -2. Go to the **API Tokens** page. -3. Click the **Generate Token** button and provide a name for your token. -4. Copy the token value and keep it secure. -5. Add the token value to the `authentication` section in the Grafana Oncall Provider configuration. +1. Log in to your Grafana account. +2. Go To "Alerts & IRM" -> OnCall. +3. Go to the **Settings** page. +4. Click the **Create** button and provide a name for your token. +5. Copy the token value and keep it secure. +6. Add the token value to the `authentication` section in the Grafana Oncall Provider configuration. ## Notes -- This provider allows you to interact with Grafana On-Call to create or update incidents. -- The `random_color` function generates a random color for incident labels. -- The `startTime` and `endTime` parameters use ISO-8601 format. -- The `notify` function returns information about the incident created or updated. +- This provider allows you to interact with Grafana OnCall to create alerts. +- Keep will create "Webhook" type integration called "Keep Integration" inside Grafana OnCall. Payload example: ```json { - "incident": { - "incidentID": "4", - "severity": "minor", - "labels": [ - { - "label": "keep-generated", - "description": "keep-generated", - "colorHex": "#9E0847" - } - ], - "isDrill": false, - "createdTime": "2023-09-10T10:31:58.030369Z", - "modifiedTime": "2023-09-10T10:31:58.030369Z", - "createdByUser": { - "userID": "grafana-incident:user-64fd801847a9191105b3c2df", - "name": "Service Account: keep-tests", - "photoURL": "https://www.gravatar.com/avatar/dbb34057685b3bc2bdc2a2808ec80772?s=512&d=retro" - }, - "closedTime": "", - "durationSeconds": 0, - "status": "active", - "title": "Test Incident", - "overviewURL": "/a/grafana-incident-app/incidents/4/test-incident", - "roles": [], - "taskList": { - "tasks": [ - { - "taskID": "must-choose-severity", - "immutable": true, - "createdTime": "2023-09-10T10:31:58.005917795Z", - "modifiedTime": "2023-09-10T10:31:58.005922353Z", - "text": "Specify incident severity", - "status": "done", - "authorUser": null, - "assignedUser": null - } - ], - "todoCount": 0, - "doneCount": 1 - }, - "summary": "", - "heroImagePath": "/api/hero-images/548564/uoKQrUg5gxteZJ6SdFrMOEhBiN6JtLHLmCSqDzDD0SX93NAhe6ChvhLORmTrSqbC2SEzde7YSKa94UcRsoizm45y3nCGv7eq7Zolk0Y5MzDJrhZRkwrksitQm2eR4iEV/v3/4.png", - "incidentStart": "2023-09-10T10:31:58.030369Z", - "incidentEnd": "" - } + "alert_uid": "08d6891a-835c-e661-39fa-96b6a9e26552", + "title": "The whole system is down", + "image_url": "https://upload.wikimedia.org/wikipedia/commons/e/ee/Grumpy_Cat_by_Gage_Skidmore.jpg", + "state": "alerting", + "link_to_upstream_details": "https://en.wikipedia.org/wiki/Downtime", + "message": "Smth happened. Oh no!" } ``` ## Useful Links -- [Grafana On-Call](https://keephq.grafana.net) -- [Grafana On-Call API Documentation](https://keephq.grafana.net/docs/api) +- [Grafana OnCall Inbound Webhook Integration](https://grafana.com/docs/oncall/latest/configure/integrations/references/webhook/) diff --git a/docs/providers/documentation/graylog-provider.mdx b/docs/providers/documentation/graylog-provider.mdx new file mode 100644 index 0000000000..acb4d32e87 --- /dev/null +++ b/docs/providers/documentation/graylog-provider.mdx @@ -0,0 +1,39 @@ +--- +title: "Graylog Provider" +sidebarTitle: "Graylog Provider" +description: "The Graylog provider enables webhook installations for receiving alerts in Keep" +--- +import AutoGeneratedSnippet from '/snippets/providers/graylog-snippet-autogenerated.mdx'; + +## Overview + +The **Graylog Provider** facilitates receiving alerts from Graylog by setting up Webhook connections. It allows seamless integration with Graylog to receive notifications about events and alerts through Keep. + + + +## Connecting with the Provider + +1. Obtain the **username** and **access token** from your Graylog instance by following [Graylog's API Access Documentation](https://go2docs.graylog.org/current/setting_up_graylog/rest_api_access_tokens.htm?tocpath=Set%20up%20Graylog%7CGet%20Started%20with%20Graylog%7CREST%C2%A0API%7C_____3#CreateanAccessToken). +2. Set the **deployment URL** to your Graylog instance's base URL (e.g., `http://127.0.0.1:9000`). +3. Ensure the user has the **Admin** role in Graylog. + +## Features + +The **Graylog Provider** supports the following key features: + +- **Webhook Setup**: Configures webhooks to send alerts to Keep. +- **Alerts Retrieval**: Fetches and formats alerts from Graylog based on specified search parameters (only a maximum of 10000 most recent alerts) + + +Ensure that the product of `page` and `per_page` does not exceed 10,000. + + + +The notification URL for Graylog v4.x has the api_key as a query param, this is the default behaviour. + + +## Useful Links + +- [Graylog API Documentation](https://go2docs.graylog.org/current/what_is_graylog/what_is_graylog.htm?tocpath=What%20Is%20Graylog%253F%7C_____0) +- [Graylog Access Token](https://go2docs.graylog.org/current/setting_up_graylog/rest_api_access_tokens.htm?tocpath=Set%20up%20Graylog%7CGet%20Started%20with%20Graylog%7CREST%C2%A0API%7C_____3#CreateanAccessToken) +- [Quick Setup for Graylog & Integration with Keep](https://github.com/keephq/keep/keep/providers/graylog_provider/README.md) diff --git a/docs/providers/documentation/grok-provider.mdx b/docs/providers/documentation/grok-provider.mdx new file mode 100644 index 0000000000..3ee022e1d9 --- /dev/null +++ b/docs/providers/documentation/grok-provider.mdx @@ -0,0 +1,17 @@ +--- +title: "Grok Provider" +description: "The Grok Provider allows for integrating X.AI's Grok language models into Keep." +--- +import AutoGeneratedSnippet from '/snippets/providers/grok-snippet-autogenerated.mdx'; + + + +## Connecting with the Provider + +To connect to Grok, you'll need to obtain an API Key: + +1. Subscribe to Grok on X.AI platform. +2. Navigate to the API section in your X.AI account settings. +3. Generate a new API key for Keep. + +Use the generated API key in the `authentication` section of your Grok Provider configuration. \ No newline at end of file diff --git a/docs/providers/documentation/http-provider.mdx b/docs/providers/documentation/http-provider.mdx index 38794fef08..d30c85b6b6 100644 --- a/docs/providers/documentation/http-provider.mdx +++ b/docs/providers/documentation/http-provider.mdx @@ -2,24 +2,9 @@ title: "HTTP Provider" description: "HTTP Provider is a provider used to query/notify using HTTP requests" --- +import AutoGeneratedSnippet from '/snippets/providers/http-snippet-autogenerated.mdx'; -## Inputs - -The `query` method of the `HttpProvider` class takes the following inputs: - -- `url`: The URL of the HTTP endpoint to query. -- `method`: The HTTP method to use for the query, either "GET", "POST", "PUT", or "DELETE". -- `headers`: A dictionary of headers to include in the HTTP request. -- `body`: A dictionary of data to include in the HTTP request body, only used for `POST`, `PUT` requests. -- `params`: A dictionary of query parameters to include in the URL of the HTTP request. - -## Outputs - -The `query` method returns the JSON representation of the HTTP response, if the response is JSON-encoded, otherwise it returns the response text as a string. - -## Authentication Parameters - -The `HttpProvider` class does not have any authentication parameters, but the authentication for the HTTP endpoint can be included in the headers or in the URL query parameters. + ## Connecting with the Provider diff --git a/docs/providers/documentation/icinga2-provider.mdx b/docs/providers/documentation/icinga2-provider.mdx new file mode 100644 index 0000000000..7031c2f0fb --- /dev/null +++ b/docs/providers/documentation/icinga2-provider.mdx @@ -0,0 +1,122 @@ +--- +title: "Icinga2 Provider" +sidebarTitle: "Icinga2" +description: "Icinga2 Provider Allows Reception of Push Alerts from Icinga2 to Keep." +--- +import AutoGeneratedSnippet from '/snippets/providers/icinga2-snippet-autogenerated.mdx'; + + + +import ProviderLogo from '@components/ProviderLogo'; + + + +# Icinga2 Provider + +The Icinga2 provider allows you to receive alerts from Icinga2 monitoring system within Keep. +Icinga2 provider supports 2 methods for recieving alerts; Webhooks & API Polling. + +The recommended and primary method for receiving alerts is via Webhooks. + +## Setup + +### Prerequisites +1. Access to an Icinga2 instance +2. API user with relevant permissions +3. Keep instance with webhook capability + +### Configuration + +The provider requires the following configuration: + +```yaml +authentication: + host_url: "https://icinga2.example.com" # Your Icinga2 instance URL + api_user: "your-api-user" # Icinga2 API username + api_password: "your-api-password" # Icinga2 API password +``` + +### Webhook Configuration +To configure Icinga2 to send alerts to Keep via webhooks: + +1. Navigate to your Icinga2 configuration directory +2. Create or edit the ```eventcommands.conf``` file +3. Add the following event command configuration: + +```plaintext +object EventCommand "keep-notification" { + command = [ "curl" ] + arguments = { + "-X" = "POST" + "-H" = "Content-Type: application/json" + "-H" = "X-API-KEY: ${keep_api_key}" + "--data" = "{ + \"host\": { + \"name\": \"$host.name$\", + \"display_name\": \"$host.display_name$\", + \"check_command\": \"$host.check_command$\", + \"acknowledgement\": \"$host.acknowledgement$\", + \"downtime_depth\": \"$host.downtime_depth$\", + \"flapping\": \"$host.flapping$\" + }, + \"service\": { + \"name\": \"$service.name$\", + \"display_name\": \"$service.display_name$\", + \"check_command\": \"$service.check_command$\", + \"acknowledgement\": \"$service.acknowledgement$\", + \"downtime_depth\": \"$service.downtime_depth$\", + \"flapping\": \"$service.flapping$\" + }, + \"check_result\": { + \"exit_status\": \"$service.state$\", + \"state\": \"$service.state_text$\", + \"output\": \"$service.output$\", + \"execution_start\": \"$service.last_check$\", + \"execution_end\": \"$service.last_check$\", + \"state_type\": \"$service.state_type$\", + \"attempt\": \"$service.check_attempt$\", + \"execution_time\": \"$service.execution_time$\", + \"latency\": \"$service.latency$\" + } + }" + "${keep_webhook_url}" = { + required = true + } + } +} +``` +4. Define variables in your Icinga2 Configuration: + - ```keep_api_key```: Your Keep API key with webhook role + - ```keep_webhook_url```: Your Keep Webhook URL +5. Create a notification rule that uses this event command +6. Restart Icinga2 to apply changes + +### State Mapping + +By Default, Icinga2 states are automatically mapped to Keep alert severities & statuses as follows: + + +#### Status Mapping +| Icinga2 State | Keep Status | +|:--------------|:------------| +| OK | RESOLVED | +| WARNING | FIRING | +| CRITICAL | FIRING | +| UNKNOWN | FIRING | +| UP | RESOLVED | +| DOWN | FIRING | + + + + +#### Severity Mapping +| Icinga2 State | Keep Severity | +|:--------------|:--------------| +| OK | INFO | +| WARNING | WARNING | +| CRITICAL | CRITICAL | +| UNKNOWN | INFO | +| UP | INFO | +| DOWN | CRITICAL | + + \ No newline at end of file diff --git a/docs/providers/documentation/ilert-provider.mdx b/docs/providers/documentation/ilert-provider.mdx index 7c1ae601f7..6776d6bee0 100644 --- a/docs/providers/documentation/ilert-provider.mdx +++ b/docs/providers/documentation/ilert-provider.mdx @@ -1,77 +1,29 @@ --- -title: "Ilert Provider" -sidebarTitle: "Ilert Provider" -description: "The ilert provider enables the creation, updating, and resolution of events or incidents on ilert, leveraging both incident management and event notification capabilities for effective incident response." +title: "ilert Provider" +sidebarTitle: "ilert Provider" +description: "The ilert provider facilitates interaction with ilert’s API, allowing for the management of incidents. This includes the ability to create, update, and resolve alerts, as well as send custom event notifications. This provider integrates Keep's system with ilert's AI-first platform for operations teams seeking seamless integration of alerting, on-call management, AI SRE and status pages for faster incident response." --- +import AutoGeneratedSnippet from '/snippets/providers/ilert-snippet-autogenerated.mdx'; ## Overview The ilert provider facilitates interaction with ilert’s API, allowing for the management of incidents and events. This includes the ability to create, update, and resolve incidents, as well as send custom event notifications. This provider integrates Keep's system with ilert's robust alerting and incident management platform. -## Inputs - -The `_type` parameter specifies the nature of the notification or action to be taken via the ilert API: - -- `incident`: This type is used for creating or updating incidents. It requires specific information such as incident summary, status, message, and details about affected services. -- `event`: This type allows for sending customized event notifications that can be configured to alert, accept, or resolve specific conditions. It supports details such as event type, summary, details about the event, custom details, and links for more context. - -Depending on the `_type` specified, the provider will route the operation to the appropriate endpoint and handle the data according to ilert's requirements for incidents or events. - -### Incident Management - -- `summary`: A brief summary of the incident. This is required for creating a new incident. -- `status`: `IlertIncidentStatus` - The current status of the incident (e.g., INVESTIGATING, RESOLVED, MONITORING, IDENTIFIED). -- `message`: A detailed message describing the incident or situation. Default is an empty string. -- `affectedServices`: A JSON string representing the list of affected services and their statuses. Default is an empty array (`"[]"`). -- `id`: The ID of the incident to update. If set to `"0"`, a new incident will be created. - -### Event Notification - -- `event_type`: Type of the event to post (`ALERT`, `ACCEPT`, `RESOLVE`). -- `details`: Detailed information about the event. -- `alert_key`: A unique key for the event to allow de-duplication. -- `priority`: Priority level of the event (`HIGH`, `LOW`). -- `images`: List of image URLs to include with the event. -- `links`: List of related links to include with the event. -- `custom_details`: Custom key-value pairs to provide additional context. -- `routing_key`: Routing key for the event to direct it to specific recipients. - -## Outputs - -Responses from ilert's API are JSON objects that include the status of the operation and any relevant incident or event details. - -## Authentication Parameters - -- `ilert_token`: API token for authenticating with ilert's API. -- `ilert_host`: API host URL. Default is `https://api.ilert.com/api`. - -## Scopes - -- `read_permission`: Mandatory for validating the API token's read capabilities. -- `write_permission`: Optional for write capabilities, validated upon use. + ## Connecting with the Provider -### API Token +To integrate Keep with ilert, follow these steps: -To obtain an ilert API token: 1. Log in to your ilert account. -2. Navigate to the "API Tokens" section under your user profile or account settings. -3. Generate a new API token. -4. Ensure "Read Permission" and "Write Permission" are enabled. -5. Click "Save". - -### Event Endpoint - -The ilert event endpoint allows the posting of custom events to the ilert system. This is useful for triggering alerts based on specific conditions detected by Keep's systems. The event endpoint can be used to alert, accept, or resolve events through ilert, providing flexibility in how events are handled. - -For more details, refer to the [ilert API Documentation](https://api.ilert.com/api-docs/?#tag/events/post/events). - -## Notes +2. Navigate to "Alert Sources" under your account settings. +3. Create a new alert source specifically for Keep. +4. Note the `ALERT-SOURCE-API-KEY` provided for this alert source. -This provider is part of Keep's integration with ilert, designed to enhance operational resilience by enabling quick and effective incident response. +The endpoint to make requests for Keep integration will be: +(https://api.ilert.com/api/v1/events/keep/{ALERT-SOURCE-API-KEY}) ## Useful Links -- [ilert API Documentation](https://api.ilert.com/api-docs/) -- [ilert Alerting](https://www.ilert.com/product/reliable-actionable-alerting) +- [ilert API Documentation](https://api.ilert.com/api-docs/?utm_campaign=Keep&utm_source=integration&utm_medium=organic) +- [ilert Alerting](https://www.ilert.com/product/reliable-actionable-alerting?utm_campaign=Keep&utm_source=integration&utm_medium=organic) diff --git a/docs/providers/documentation/incidentio-provider.mdx b/docs/providers/documentation/incidentio-provider.mdx index bfbccc597f..65f82f9b04 100644 --- a/docs/providers/documentation/incidentio-provider.mdx +++ b/docs/providers/documentation/incidentio-provider.mdx @@ -3,28 +3,13 @@ title: "Incident.io Provider" sidebarTitle: "Incident.io Provider" description: "The Incident.io provider enables the querying of incidents on Incident.io, leveraging incident management capabilities for effective response." --- +import AutoGeneratedSnippet from '/snippets/providers/incidentio-snippet-autogenerated.mdx'; ## Overview The Incident.io provider facilitates interaction with Incident.io's API, allowing for the management of incidents. This includes the ability to query specific incidents, retrieve all incidents, and manage incident details. This provider integrates Keep's system with Incident.io's robust incident management platform. - -### Query Specific Incident - -- `incident_id`: The ID of the incident to be queried. Required for fetching specific incident details. - -## Outputs - -Returns the specific incident with id=`incident_id` - -## Authentication Parameters - -- `incidentIoApiKey`: API key for authenticating with Incident.io's API. - -## Scopes - -- `authenticated`: Mandatory for all operations, ensures the user is authenticated. -- `read_access`: Mandatory for querying incidents, ensures the user has read access. + ## Connecting with the Provider @@ -42,10 +27,6 @@ The Incident.io incident endpoint allows querying and managing incidents. Operat For more details, refer to the [Incident.io API Documentation](https://api-docs.incident.io/). -## Notes - -This provider is part of Keep's integration with Incident.io, designed to enhance operational resilience by enabling efficient incident management and response. - ## Useful Links - [Incident.io API Documentation](https://api-docs.incident.io/) diff --git a/docs/providers/documentation/incidentmanager-provider.mdx b/docs/providers/documentation/incidentmanager-provider.mdx index bdc7eaa8e8..bc1f704a7e 100644 --- a/docs/providers/documentation/incidentmanager-provider.mdx +++ b/docs/providers/documentation/incidentmanager-provider.mdx @@ -2,30 +2,11 @@ title: "Incident Manager Provider" sidebarTitle: "Incident Manager Provider" --- - -# Incident Manager Provider +import AutoGeneratedSnippet from '/snippets/providers/incidentmanager-snippet-autogenerated.mdx'; The Incident Manager Provider allows you to push incidents from AWS IncidentManager to Keep. -## Authentication Configuration - -To authenticate with the Incident Manager Provider, you need to provide the following configuration parameters: - -- `access_key`: AWS access key (required, sensitive) -- `access_key_secret`: AWS access key secret (required, sensitive) -- `region`: AWS region (required) -- `response_plan_arn`: AWS Response Plan's ARN (required, hint: Default response plan ARN to use when interacting with incidents, if not provided, we won't be able to register web hook for the incidents) -- `sns_topic_arn`: AWS SNS Topic ARN you want to be used/using in response plan (required, hint: Default SNS topic to use when creating incidents, if not provided, we won't be able to register web hook for the incidents) - -## Provider Scopes - -The Incident Manager Provider requires the following provider scopes: - -- `ssm-incidents:ListIncidentRecords`: Required to retrieve incidents. [Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm-incidents.html) (mandatory, alias: Describe Incidents) -- `ssm-incidents:GetResponsePlan`: Required to get response plan and register Keep as webhook. [Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm-incidents.html) (optional, alias: Update Response Plan) -- `ssm-incidents:UpdateResponsePlan`: Required to update response plan and register Keep as webhook. [Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm-incidents.html) (optional, alias: Update Response Plan) -- `iam:SimulatePrincipalPolicy`: Allow Keep to test the scopes of the current user/role without modifying any resource. [Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm-incidents.html) (optional, alias: Simulate IAM Policy) -- `sns:ListSubscriptionsByTopic`: Required to list all subscriptions of a topic, so Keep will be able to add itself as a subscription. [Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm-incidents.html) (optional, alias: List Subscriptions) + ## Status Map diff --git a/docs/providers/documentation/jira-on-prem-provider.mdx b/docs/providers/documentation/jira-on-prem-provider.mdx new file mode 100644 index 0000000000..3648b228da --- /dev/null +++ b/docs/providers/documentation/jira-on-prem-provider.mdx @@ -0,0 +1,10 @@ +--- +title: "Jira On-Prem Provider" +sidebarTitle: "Jira On-Prem Provider" +description: "Jira On-Prem Provider is a provider used to query data and creating issues in Jira" +--- +import AutoGeneratedSnippet from '/snippets/providers/jiraonprem-snippet-autogenerated.mdx'; + +This is on-prem Jira provider documentation, for regular please check [Jira Provider](./jira-provider.md). + + \ No newline at end of file diff --git a/docs/providers/documentation/jira-provider.mdx b/docs/providers/documentation/jira-provider.mdx index 89a5d8e5e5..33684c2ec1 100644 --- a/docs/providers/documentation/jira-provider.mdx +++ b/docs/providers/documentation/jira-provider.mdx @@ -1,38 +1,240 @@ --- -title: "Jira Provider" -sidebarTitle: "Jira Provider" -description: "Jira provider is a provider used to query data and creating issues in Jira" +title: "Jira Cloud Provider" +sidebarTitle: "Jira Cloud Provider" +description: "Jira Cloud provider is a provider used to query data and creating issues in Jira" --- +import AutoGeneratedSnippet from '/snippets/providers/jira-snippet-autogenerated.mdx'; -## Inputs + -The `query` function take following parameters as inputs: +## Connecting with the Provider -- `host` (required): Jira host name of the project. -- `board_id` (required): Jira board id. -- `email` (required): Your accout email. +1. Go to https://id.atlassian.com/manage-profile/security/api-tokens to Create API token and generated token should be passed to jira authentication. +2. Get `host` and `board_id` from your respective board from its URL. +3. Get `project_key` from your project > settings > details. +4. `email` would be same as of your account email. -The `notify` function take following parameters as inputs: +## Auto-Transition Workflows -- `host` (required): Jira host name of the project. -- `email` (required): Your accout email. -- `project_key` (required): Your jira project key. -- `summary` (required): Incident/issue name or short description. -- `description` (optional): Additional details related to the incident/issue. -- `issue_type` (optional): Issue type name. For example: `Story`, `Bug` etc +The Jira provider supports automatically transitioning tickets when alerts change status. This is useful for keeping your Jira board synchronized with alert states - for example, automatically closing tickets when alerts are resolved. -## Outputs +### Prerequisites -## Authentication Parameters +1. Configure a Jira Cloud provider in Keep +2. Ensure your Jira user has the `TRANSITION_ISSUES` permission +3. Know your Jira board name and desired transition status names -The `query` and `notify` function requires an `api_token` from Jira. +### Workflow 1: Create Jira Ticket on Alert -## Connecting with the Provider +This workflow creates a Jira ticket when an alert fires, but only if no ticket has been created yet. -1. Go to https://id.atlassian.com/manage-profile/security/api-tokens to Create API token and generated token should be passed to jira authentication. -2. Get `host` and `board_id` from your respective board from its URL. -3. Get `project_key` from your project > settings > details. -4. `email` would be same as of your account email. +```yaml +workflow: + id: jira-create-ticket-on-alert + name: Create Jira Ticket on Alert + description: Create Jira ticket when alert fires + disabled: false + triggers: + - type: alert + cel: status == "firing" + actions: + - name: jira-action + if: "not '{{ alert.ticket_id }}'" + provider: + type: jira + config: "{{ providers.JiraCloud }}" + with: + board_name: YOUR_BOARD_NAME # Change this to your board name + issue_type: Task # Or Bug, Story, etc. + summary: "{{ alert.name }} - {{ alert.description }}" + description: | + "This ticket was created automatically by Keep. + + Alert Details: + {code:json} + {{ alert }} + {code}" + enrich_alert: + - key: ticket_type + value: jira + - key: ticket_id + value: results.issue.key + - key: ticket_url + value: results.ticket_url +``` + +**Key Points:** +- `if: "not '{{ alert.ticket_id }}'"` - Only creates a ticket if one doesn't exist yet +- `enrich_alert` - Stores the ticket ID, type, and URL in the alert for later use +- The ticket is created in the default status (usually "To Do" or "Open") + +### Workflow 2: Transition Ticket to Done on Alert Resolved + +This workflow updates the existing Jira ticket and transitions it to "Done" when the alert is resolved. + +```yaml +workflow: + id: jira-transition-on-resolved + name: Transition Jira Ticket to Done + description: Close Jira ticket when alert is resolved + disabled: false + triggers: + - type: alert + cel: status == "resolved" + actions: + - name: jira-action + provider: + type: jira + config: "{{ providers.JiraCloud }}" + with: + issue_id: "{{ alert.ticket_id }}" + summary: "{{ alert.name }} - {{ alert.description }} (resolved)" + description: | + "Alert has been resolved automatically by Keep. + + Resolved at: {{ alert.lastReceived }} + + Original Alert Details: + {code:json} + {{ alert }} + {code}" + transition_to: Done # Change to your workflow's status name +``` + +**Key Points:** +- Uses `issue_id: "{{ alert.ticket_id }}"` from the enriched alert data +- `transition_to: Done` - Transitions the ticket to the specified status +- No `if` condition needed - if the alert has no `ticket_id`, the action will simply fail gracefully + +### Available Transition Names + +Common Jira transition names (varies by workflow): +- `Done` +- `Resolved` +- `Closed` +- `In Progress` +- `To Do` +- `Canceled` + +**How to find your transition names:** +1. Go to your Jira project settings +2. Navigate to Workflows +3. Check the available statuses in your workflow +4. Use the exact status name in the `transition_to` parameter (case-insensitive) + +### Error Handling + +If you specify an invalid transition name, the Jira provider will return a helpful error message listing all available transitions for that ticket: + +``` +Transition 'Invalid' not found. Available transitions: To Do, In Progress, Done, Closed +``` + +### Example: Three-State Workflow + +You can also create intermediate transitions: + +```yaml +# Workflow 3: Move to In Progress when acknowledged +workflow: + id: jira-transition-in-progress + name: Transition to In Progress + description: Move ticket to In Progress when alert is acknowledged + disabled: false + triggers: + - type: alert + cel: status == "acknowledged" + actions: + - name: jira-action + provider: + type: jira + config: "{{ providers.JiraCloud }}" + with: + issue_id: "{{ alert.ticket_id }}" + summary: "{{ alert.name }} - In Progress" + description: "Alert acknowledged and being worked on." + transition_to: In Progress +``` + +### Testing + +1. **Create an alert** that triggers the first workflow +- Verify a Jira ticket is created +- Check that the alert has `ticket_id`, `ticket_type`, and `ticket_url` fields + +2. **Resolve the alert** to trigger the second workflow +- Verify the existing ticket is updated (no new ticket created) +- Check that the ticket status changed to "Done" + +3. **Check the logs** in Keep UI for any errors or debugging info + +### Troubleshooting + +#### Issue: Workflow creates a new ticket instead of updating + +**Cause:** The `issue_id` parameter is missing or the alert doesn't have a `ticket_id`. + +**Solution:** Ensure the first workflow enriches the alert with `ticket_id` and the second workflow uses it via `issue_id: "{{ alert.ticket_id }}"`. + +#### Issue: Transition fails with "Transition 'X' not found" + +**Cause:** The transition name doesn't match your Jira workflow. + +**Solution:** Check the error message for available transitions and update the `transition_to` parameter accordingly. + +#### Issue: Permission denied when transitioning + +**Cause:** Your Jira user doesn't have the `TRANSITION_ISSUES` permission. + +**Solution:** Grant the necessary permissions in Jira project settings. + +### Advanced Features + +#### Configuration Variables + +You can use Keep's configuration variables to make the workflows more flexible: + +```yaml +consts: + JIRA_BOARD: "ALERTS" + JIRA_DONE_STATUS: "Done" + JIRA_ISSUE_TYPE: "Task" + +# Then use in workflows: +board_name: "{{ consts.JIRA_BOARD }}" +transition_to: "{{ consts.JIRA_DONE_STATUS }}" +issue_type: "{{ consts.JIRA_ISSUE_TYPE }}" +``` + +#### Custom Fields + +You can also set custom fields when creating or updating tickets: + +```yaml +with: + issue_id: "{{ alert.ticket_id }}" + summary: "Alert resolved" + custom_fields: + customfield_10001: "High" + customfield_10002: "Production" + transition_to: Done +``` + +#### Labels and Components + +```yaml +with: + board_name: YOUR_BOARD_NAME + summary: "{{ alert.name }}" + description: "{{ alert.description }}" + labels: + - alert + - automated + - critical + components: + - Monitoring + - Infrastructure +``` ## Notes @@ -41,3 +243,4 @@ The `query` and `notify` function requires an `api_token` from Jira. - https://id.atlassian.com/manage-profile/security/api-tokens - https://developer.atlassian.com/cloud/jira/software/rest/api-group-board/#api-rest-agile-1-0-board-boardid-issue-get - https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issues/#api-rest-api-2-issue-post +- https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issues/#api-rest-api-2-issue-issueidorkey-transitions-get (Transitions API) \ No newline at end of file diff --git a/docs/providers/documentation/kafka-provider.mdx b/docs/providers/documentation/kafka-provider.mdx new file mode 100644 index 0000000000..76bc223a35 --- /dev/null +++ b/docs/providers/documentation/kafka-provider.mdx @@ -0,0 +1,19 @@ +--- +title: "Kafka" +sidebarTitle: "Kafka Provider" +description: "Kafka provider allows integration with Apache Kafka for producing and consuming messages." +--- +import AutoGeneratedSnippet from '/snippets/providers/kafka-snippet-autogenerated.mdx'; + + + +## Connecting with the Provider + +1. Set up a Kafka broker (or use an existing one) and make sure it is accessible. +2. Get the broker URL (e.g., `localhost:9092` or a remote Kafka service URL). +3. (Optional) If using secure communication, provide the security protocol, SASL mechanism, username, and password. +4. Configure the provider with these parameters. + +## Usefull Links +-[Kafka Clients Documentation](https://kafka.apache.org/documentation/) + diff --git a/docs/providers/documentation/keep-provider.mdx b/docs/providers/documentation/keep-provider.mdx new file mode 100644 index 0000000000..54edfbc03b --- /dev/null +++ b/docs/providers/documentation/keep-provider.mdx @@ -0,0 +1,18 @@ +--- +title: "Keep" +sidebarTitle: "Keep Provider" +description: "Keep provider allows you to query and manage alerts in Keep." +--- +import AutoGeneratedSnippet from '/snippets/providers/keep-snippet-autogenerated.mdx'; + + + +## Authentication Parameters + +To use the Keep provider, you must authenticate with an API token associated with your Keep account. This token can be generated from your Keep dashboard. + +## Connecting with the Provider + +1. Log in to your Keep account. +2. Navigate to the API section of your account dashboard and generate an API token. +3. Use this token to authenticate when querying alerts via the Keep provider. diff --git a/docs/providers/documentation/kibana-provider.mdx b/docs/providers/documentation/kibana-provider.mdx index f9f0ad399b..b84c2959c5 100644 --- a/docs/providers/documentation/kibana-provider.mdx +++ b/docs/providers/documentation/kibana-provider.mdx @@ -3,6 +3,7 @@ title: "Kibana" sidebarTitle: "Kibana Provider" description: "Kibana provider allows you get alerts from Kibana Alerting via webhooks." --- +import AutoGeneratedSnippet from '/snippets/providers/kibana-snippet-autogenerated.mdx'; -## Inputs - -_No information yet, feel free to contribute it using the "Edit this page" link at the bottom of the page_ - -## Outputs - -_No information yet, feel free to contribute it using the "Edit this page" link at the bottom of the page_ - -## Authentication Parameters - -The `api_key` and `kibana_host` are required for connecting to the Kibana provider. You can obtain them as described in the "Connecting with the Provider" section. -`kibana_port` can be used to override the default Kibana port (9243) + ## Connecting with the Provider @@ -66,27 +56,6 @@ To obtain a Kibana API key, follow these steps: Fingerprints in Kibana are simply the alert instance ID. -## Scopes - -Certain scopes may be required to perform specific actions or queries via the Datadog Provider. Below is a summary of relevant scopes and their use cases: - -- rulesSettings:read (Read alerts) - Required: True - Description: Read alerts. -- rulesSettings:write (Modify Alerts) - Required: True - Description: Modify alerts. -- actions:read (Read connectors) - Required: True - Description: Read connectors. -- actions:write (Write connectors) - Required: True - Description: Write connectors. - -## Notes - -_No information yet, feel free to contribute it using the "Edit this page" link at the bottom of the page_ - ## Useful Links - [Kibana Alerting](https://www.elastic.co/guide/en/kibana/current/alerting-getting-started.html) diff --git a/docs/providers/documentation/kubernetes-provider.mdx b/docs/providers/documentation/kubernetes-provider.mdx index 3d6db3d66c..2bcb86aa24 100644 --- a/docs/providers/documentation/kubernetes-provider.mdx +++ b/docs/providers/documentation/kubernetes-provider.mdx @@ -2,26 +2,10 @@ title: "Kubernetes" description: "Kubernetes provider to perform rollout restart or list pods action." --- +import AutoGeneratedSnippet from '/snippets/providers/kubernetes-snippet-autogenerated.mdx'; -## Inputs -- **action** (required): Determines the which action to perform (`rollout_restart`, `list_pods`). -- **kind** (required): Kind of the object to perform rollout restart action. -- **object_name** (required): Name of the object to perform rollout restart action. -- **namespace** (required): Namespace of the object to perform rollout restart or list pods action. -- **labels** (optional): Labels to filter the pods while performing list pods action and also filters before performing rollout restart. - -## Outputs - -- **message**: Message for the action performed. - -## Authentication Parameters - -This provider offers you to authenticate with Openshift using: api_server, token and insecure. - -- **api_server** (required): The api server url of your Kubernetes cluster. -- **token** (required): The token of your service account to authenticate with Kubernetes. -- **insecure** (optional): If you want to skip the certificate verification, set this to `True` (default: True). + ## Connecting with the Provider @@ -38,4 +22,3 @@ To connect to Kubernetes, follow below steps: ## Useful Links - [Access Kubernetes Cluster](https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/) - diff --git a/docs/providers/documentation/libre_nms-provider.mdx b/docs/providers/documentation/libre_nms-provider.mdx new file mode 100644 index 0000000000..74b2fe9859 --- /dev/null +++ b/docs/providers/documentation/libre_nms-provider.mdx @@ -0,0 +1,137 @@ +--- +title: 'LibreNMS' +sidebarTitle: 'LibreNMS Provider' +description: 'LibreNMS allows you to receive alerts from LibreNMS using API endpoints as well as webhooks' +--- +import AutoGeneratedSnippet from '/snippets/providers/libre_nms-snippet-autogenerated.mdx'; + + + +## Connecting LibreNMS to Keep + +1. Open LibreNMS dashboard and click on settings in the top right corner. + + + + + +2. Click on `Create API access token` to generate a new API key. + + + + + +3. Give a description to the API key and click on `Create API Token`. + + + + + +## Webhooks Integration + +1. Open LibreNMS dashboard and open `Alerts` tab in the navigation bar and click on `Alert Transports`. + + + + + +2. Click on `Create add transport` and select `Transport type` as `API`. Select the `API Method` as `POST`. + +3. Fill the `API URL` with [https://api.keephq.dev/alerts/event/libre_nms](https://api.keephq.dev/alerts/event/libre_nms). + + + + + +4. Copy the below JSON and paste it in `body` field. + +```json +{ + "title": "{{ $title }}", + "hostname": "{{ $hostname }}", + "device_id": "{{ $device_id }}", + "sysDescr": "{{ $sysDescr }}", + "sysName": "{{ $sysName }}", + "sysContact": "{{ $sysContact }}", + "os": "{{ $os }}", + "type": "{{ $type }}", + "ip": "{{ $ip }}", + "display": "{{ $display }}", + "version": "{{ $version }}", + "hardware": "{{ $hardware }}", + "features": "{{ $features }}", + "serial": "{{ $serial }}", + "status": "{{ $status }}", + "status_reason": "{{ $status_reason }}", + "location": "{{ $location }}", + "description": "{{ $description }}", + "notes": "{{ $notes }}", + "uptime": "{{ $uptime }}", + "uptime_short": "{{ $uptime_short }}", + "uptime_long": "{{ $uptime_long }}", + "elapsed": "{{ $elapsed }}", + "alerted": "{{ $alerted }}", + "alert_id": "{{ $alert_id }}", + "alert_notes": "{{ $alert_notes }}", + "proc": "{{ $proc }}", + "rule_id": "{{ $rule_id }}", + "id": "{{ $id }}", + "faults": "{{ $faults }}", + "uid": "{{ $uid }}", + "severity": "{{ $severity }}", + "rule": "{{ $rule }}", + "name": "{{ $name }}", + "string": "{{ $string }}", + "timestamp": "{{ $timestamp }}", + "contacts": "{{ $contacts }}", + "state": "{{ $state }}", + "msg": "{{ $msg }}", + "builder": "{{ $builder }}" +} +``` + +5. Follow the below steps to create a new API key in Keep. + +6. Go to Keep dashboard and click on the profile icon in the botton left corner and click `Settings`. + + + + + +7. Select `Users and Access` tab and then select `API Keys` tab and create a new API key. + + + + + +8. Give name and select the role as `webhook` and click on `Create API Key`. + + + + + +9. Copy the API key. + + + + + +10. Add a new header with key as `X-API-KEY` and create a new API key in Keep and paste it as the value and save the webhook. + + + + + +11. Save the webhook. + +12. You can add devices from the Devices tab in the LibreNMS dashboard and select the alert transport that you have created. + + + + + +13. Now, you will receive the alerts in Keep. + +## Useful Links + +- [LibreNMS](https://www.librenms.org/) \ No newline at end of file diff --git a/docs/providers/documentation/linear_provider.mdx b/docs/providers/documentation/linear_provider.mdx index 5d5b068390..dbcbe66642 100644 --- a/docs/providers/documentation/linear_provider.mdx +++ b/docs/providers/documentation/linear_provider.mdx @@ -3,20 +3,11 @@ title: "Linear Provider" sidebarTitle: "Linear Provider" description: "Linear Provider is a provider for fetching data and creating issues in Linear app." --- +import AutoGeneratedSnippet from '/snippets/providers/linear-snippet-autogenerated.mdx'; -## Inputs + -- **team_name** (required): The team name associated with the issue. -- **project_name** (required): The project name associated with the issue. -- **title** (required): The title of the incident. -- **description** (optional): Additional details of the incident. -- **priority** (optional): The priority for the incident in linear issue (numeric value within 0 to 4). - -## Outputs - -Linear Provider supports both `query` and `notify` methods. - -## Authentication Parameters +## How to set up The Linear Provider uses `api_token` for request authorization. You need to provider the following: diff --git a/docs/providers/documentation/linearb-provider.mdx b/docs/providers/documentation/linearb-provider.mdx index 0a88dadd8f..65776e4d48 100644 --- a/docs/providers/documentation/linearb-provider.mdx +++ b/docs/providers/documentation/linearb-provider.mdx @@ -3,6 +3,7 @@ title: "LinearB" sidebarTitle: "LinearB Provider" description: "The LinearB provider enables integration with LinearB's API to manage and notify incidents directly through webhooks." --- +import AutoGeneratedSnippet from '/snippets/providers/linearb-snippet-autogenerated.mdx'; -## Inputs - -- `provider_id`: Unique identifier for the provider instance. -- `http_url`: The URL to be associated with the incident for direct access. -- `title`: Title of the incident. -- `teams`: JSON string of teams involved in the incident. -- `respository_urls`: JSON string of repository URLs related to the incident. -- `services`: JSON string of services affected by the incident. -- `started_at`: Incident start time in ISO format. -- `ended_at`: Incident end time in ISO format. -- `git_ref`: Git reference (branch, tag, commit) associated with the incident. - -## Outputs - -- JSON response from LinearB API indicating the success or failure of the operation. - -## Authentication Parameters - -- `api_token`: Required for authenticating with LinearB's API. This token must be kept secure as it allows access to manage incidents. + ## Connecting with the Provider @@ -49,4 +32,4 @@ To use the LinearB provider, you must obtain an API token from LinearB: ### Useful Links -- [LinearB API Reference](https://docs.linearb.io/api-overview/) +- [LinearB API Reference](https://docs.linearb.io/api-overview/) \ No newline at end of file diff --git a/docs/providers/documentation/litellm-provider.mdx b/docs/providers/documentation/litellm-provider.mdx new file mode 100644 index 0000000000..8948e88290 --- /dev/null +++ b/docs/providers/documentation/litellm-provider.mdx @@ -0,0 +1,7 @@ +--- +title: "LiteLLM Provider" +description: "The LiteLLM Provider enables integration with LiteLLM proxy into Keep." +--- +import AutoGeneratedSnippet from '/snippets/providers/litellm-snippet-autogenerated.mdx'; + + \ No newline at end of file diff --git a/docs/providers/documentation/llamacpp-provider.mdx b/docs/providers/documentation/llamacpp-provider.mdx new file mode 100644 index 0000000000..32aba7dee6 --- /dev/null +++ b/docs/providers/documentation/llamacpp-provider.mdx @@ -0,0 +1,45 @@ +--- +title: "Llama.cpp Provider" +description: "The Llama.cpp Provider allows for integrating locally running Llama.cpp models into Keep." +--- +import AutoGeneratedSnippet from '/snippets/providers/llamacpp-snippet-autogenerated.mdx'; + + + The Llama.cpp Provider supports querying local Llama.cpp models for prompt-based + interactions. Make sure you have Llama.cpp server running locally with your desired model. + + +### **Cloud Limitation** +This provider is disabled for cloud environments and can only be used in local or self-hosted environments. + + + +## Connecting with the Provider + +To use the Llama.cpp Provider: + +1. Install Llama.cpp on your system +2. Download or convert your model to GGUF format +3. Start the Llama.cpp server with HTTP interface: + ```bash + ./server --model /path/to/your/model.gguf --host 0.0.0.0 --port 8080 + ``` +4. Configure the host URL and model path in your Keep configuration + +## Prerequisites + +- Llama.cpp must be installed and compiled with server support +- A GGUF format model file must be available on your system +- The Llama.cpp server must be running and accessible +- The server must have sufficient resources to load and run your model + +## Model Compatibility + +The provider works with any GGUF format model compatible with Llama.cpp, including: +- LLaMA and LLaMA-2 models +- Mistral models +- OpenLLaMA models +- Vicuna models +- And other compatible model architectures + +Make sure your model is in GGUF format before using it with the provider. \ No newline at end of file diff --git a/docs/providers/documentation/mailchimp-provider.mdx b/docs/providers/documentation/mailchimp-provider.mdx deleted file mode 100644 index 30a106c9ea..0000000000 --- a/docs/providers/documentation/mailchimp-provider.mdx +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: "Mailchimp" -sidebarTitle: "Mailchimp Provider" ---- - -# Mailchimp Provider - -MailchimpProvider is a class that implements the Mailchimp API and allows email sending through Keep. - -## Inputs -The `notify` function of `MailchimpProvider` takes the following arguments: - -- `_from` (str): Required. The email address of the sender. -- `to` (str): Required. The email address of the recipient. -- `subject` (str): Required. The subject of the email. -- `html` (str): Required. The HTML body of the email. -- `**kwargs` (optional): Additional optional parameters can be provided as key-value pairs. - -See [documentation](https://mailchimp.com/docs/api-reference/emails/send-email) for more - -## Outputs -The `notify` function of `MailchimpProvider` outputs the following format (example): - -```json -{ - "email": "user@example.com", - "status": "sent", - "_id": "8db77476a09d4b47ae1b9bc69d1c74e3", - "reject_reason": null, - "queued_reason": null -} -``` - -See [documentation](https://mailchimp.com/developer/transactional/guides/quick-start/) for more - - -## Authentication Parameters -The Mailchimp provider requires the following authentication parameter: - -- `api_key`: Required. Mailchimp Transactional API key. You can obtain an API key by visiting [Mailchimp API Keys](https://mandrillapp.com//settings). - -## Connecting with the Provider -To connect with the Mailchimp provider and send emails through Keep, follow these steps: - -1. Obtain a Mailchimp Transactional API key: Visit [Mailchimp API Keys](https://mandrillapp.com//settings) to obtain an API key if you don't have one already. -2. Configure the Mailchimp provider in your system with the obtained API key. -3. Use the following YAML example to send an email notification using the Mailchimp provider: - -```yaml title=examples/alert_example.yml -# Send an email notification using the Mailchimp provider. -alert: - id: email-notification - description: Send an email notification using Mailchimp - actions: - - name: send-email - provider: - type: mailchimp - config: "{{ providers.mailchimp-provider }}" - with: - _from: "sender@example.com" - to: "recipient@example.com" - subject: "Hello from Mailchimp Provider" - html: "

This is the email body.

" -``` - -## Useful Links -- [Mailchimp API Keys](https://mailchimp.com/developer/transactional/guides/quick-start/#generate-your-api-key) diff --git a/docs/providers/documentation/mailgun-provider.mdx b/docs/providers/documentation/mailgun-provider.mdx new file mode 100644 index 0000000000..8c3714f404 --- /dev/null +++ b/docs/providers/documentation/mailgun-provider.mdx @@ -0,0 +1,63 @@ +--- +title: "Mailgun Provider" +description: "Mailgun Provider allows sending alerts to Keep via email." +--- +import AutoGeneratedSnippet from '/snippets/providers/mailgun-snippet-autogenerated.mdx'; + + + Mailgun currently supports receiving alerts via email. We will add querying + and notifying soon. + + + + + +## Connecting with the Provider + +To connect to Mailgun, you do not need to perform any actions on the Mailgun side. We use our own Mailgun account and handle everything for you. + +## Post Installation Validation + +You can check that the Mailgun Provider works by sending a test email to the configured email address. + +1. Send a test email to the email address provided in the `authentication` section. +2. Check Keep's platform to see if the alert is received. + + + + + +## Default Alert Values + +When no extraction rules are set, the default values for every alert are as follows: + +- **name**: The subject of the email. +- **source**: The sender of the email. +- **message**: The stripped text content of the email. +- **timestamp**: The timestamp of the email, converted to ISO format. +- **severity**: "info" +- **status**: "firing" + +## How Extraction Works + +Extraction rules allow you to extract specific information from the email content using regular expressions. This can be useful for parsing and structuring the alert data. + + + + + +### Example Extraction Rule + +An extraction rule is defined as a dictionary with the following keys: + +- **key**: The key in the email event to apply the extraction rule to. +- **value**: The regular expression to use for extraction. + +#### Example + +Extract the severity from the subject of the email. + +``` +Key: subject +Value: (?P\w+): +``` diff --git a/docs/providers/documentation/mattermost-provider.mdx b/docs/providers/documentation/mattermost-provider.mdx index 744d6d9fce..22eea2eac7 100644 --- a/docs/providers/documentation/mattermost-provider.mdx +++ b/docs/providers/documentation/mattermost-provider.mdx @@ -3,24 +3,9 @@ title: "Mattermost Provider" sidebarTitle: "Mattermost Provider" description: "Mattermost provider is used to send messages to Mattermost." --- +import AutoGeneratedSnippet from '/snippets/providers/mattermost-snippet-autogenerated.mdx'; -## Inputs - -The `notify` function takes the following parameters as inputs: - -- `message`: Optional. The alert message to send to Mattermost. -- `blocks`: Optional. An array of blocks to format the message content. -- `channel`: Optional. The Mattermost channel to which the message should be sent. - -## Outputs - -N/A - -## Authentication Parameters - -The `MattermostProvider` requires the following authentication parameter: - -- `webhook_url`: Required. Mattermost Webhook URL. + ## Connecting with the Provider diff --git a/docs/providers/documentation/microsoft-planner-provider.mdx b/docs/providers/documentation/microsoft-planner-provider.mdx deleted file mode 100644 index 6eb9031943..0000000000 --- a/docs/providers/documentation/microsoft-planner-provider.mdx +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: "Microsoft Planner Provider" -description: "Microsoft Planner Provider for creating tasks in Planner." ---- - -## Inputs - -- **title** (required): The title of the task to be created. -- **plan_id** (required): The ID of the Planner plan where the task will be created. -- **bucket_id** (optional): The ID of the bucket where the task will be placed. - - - -## Authentication Parameters - -The Microsoft Planner Provider uses the following authentication parameters to generate an access token for authentication. You need to provide the following authentication parameters to connect to the Microsoft Planner Provider: - -- **client_id** (required): The client ID of your registered application in Azure. -- **client_secret** (required): The client secret generated for your registered application in Azure. -- **tenant_id** (required): The tenant ID where the authentication app was registered in Azure. - -## Connecting with the Provider - -To connect to Microsoft Planner, follow these steps: - -1. Log in to your [Azure](https://azure.microsoft.com/) account. -2. Register a new application [here](https://portal.azure.com/#view/Microsoft_AAD_RegisteredApps/CreateApplicationBlade/isMSAApp~/false). -3. After successfully registering the application, navigate to the **API permissions** page and add the following permissions: - - `Tasks.Read.All` - - `Tasks.ReadWrite.All` -4. Go to the **Overview** page and make note of the `Application (client) ID` and `Directory (tenant) ID`. -5. Visit the **Certificates & secrets** page, create a new client secret, and make note of the client secret value. -6. Add the client ID, client secret, and tenant ID to the `authentication` section in the Microsoft Planner Provider configuration. - -## Notes - -- This provider enables you to interact with Microsoft Planner to create tasks. - -## Useful Links - -- [Microsoft Planner](https://learn.microsoft.com/en-us/graph/api/resources/planner-overview?view=graph-rest-1.0) -- [Azure](https://azure.microsoft.com/) - -# diff --git a/docs/providers/documentation/mock-provider.mdx b/docs/providers/documentation/mock-provider.mdx index 7b95f37a29..7336d49419 100644 --- a/docs/providers/documentation/mock-provider.mdx +++ b/docs/providers/documentation/mock-provider.mdx @@ -3,27 +3,6 @@ title: "Mock" sidebarTitle: "Mock Provider" description: "Template Provider is a template for newly added provider's documentation" --- +import AutoGeneratedSnippet from '/snippets/providers/mock-snippet-autogenerated.mdx'; -## Inputs - -_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ - -## Outputs - -_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ - -## Authentication Parameters - -_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ - -## Connecting with the Provider - -_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ - -## Notes - -_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ - -## Useful Links - -_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + diff --git a/docs/providers/documentation/monday-provider.mdx b/docs/providers/documentation/monday-provider.mdx new file mode 100644 index 0000000000..882236f77f --- /dev/null +++ b/docs/providers/documentation/monday-provider.mdx @@ -0,0 +1,54 @@ +--- +title: 'Monday' +sidebar_label: 'Monday Provider' +description: 'Monday Provider allows you to add new pulses to your boards' +--- +import AutoGeneratedSnippet from '/snippets/providers/monday-snippet-autogenerated.mdx'; + +## Overview + +Monday Provider enables seamless integration with Monday.com, a work operating system that powers teams to run projects and workflows with confidence. With Monday Provider, you can add new pulses to your boards. + + + +#### Admin tab + +If you are an admin user on your monday.com account, follow these steps to access your API token: + +1. Log into your monday.com account. +2. Click on your avatar/profile picture in the top right corner. +3. Select Administration > Connections > API. +4. Copy your personal token. Please note that you can always regenerate a new token, but doing so will cause any previous tokens to expire. + +#### Developer tab + +If you are a member user or an admin on your monday.com account, follow these steps to access your API token: + +1. Log into your monday.com account. +2. Click on your profile picture in the top right corner. +3. Select Developers. This will open the Developer Center in another tab. +4. Click My Access Tokens > Show. +5. Copy your personal token. Please note that you can always regenerate a new token, but doing so will cause any previous tokens to expire. + +## Connecting Monday to Keep + +1. Obtain the API Token from Monday. +2. Add Monday as a provider in Keep. +3. Give the provider a name and paste the API Token in the `Personal API Token` field and click `Connect`. + +## How to use? + +1. In order to add a new pulse to your board, you need the following information: + - Board ID: The ID of the board where you want to add the pulse. + - Group ID: The ID of the group where you want to add the pulse. + - Item Name: The name of the pulse you want to add. + - Column Values: The values of the columns you want to set for the pulse. +2. Open the board where you want to add the pulse in the monday.com app. +3. Hover over the board name in the side panel and click on the three dots that appear and click on ID to copy the board ID. +4. Hover over the group name in the board and click on the three dots that appear and click on Group ID to copy the group ID. +5. Item Name is the name of the pulse you want to add. +6. Column ID and Column Value are the values of the columns you want to set for the pulse. Hover over the column name in the board and click on the three dots that appear and click on Column ID to copy the column ID. The column value is the value you want to set for the column. + +## Useful Links +- [Monday.com](https://monday.com/) +- [Example workflow for Monday Provider](https://github.com/keephq/keep/blob/main/examples/workflows/monday_create_pulse.yml) diff --git a/docs/providers/documentation/mongodb-provider.mdx b/docs/providers/documentation/mongodb-provider.mdx index 6294c93ae5..2f8949a9e3 100644 --- a/docs/providers/documentation/mongodb-provider.mdx +++ b/docs/providers/documentation/mongodb-provider.mdx @@ -3,28 +3,11 @@ title: "MongoDB" sidebarTitle: "MongoDB Provider" description: "MongoDB Provider is a provider used to query MongoDB databases" --- +import AutoGeneratedSnippet from '/snippets/providers/mongodb-snippet-autogenerated.mdx'; -## Inputs -The `query` function of `MongoDBProvider` takes the following arguments: + -- `query` (str): A string containing the query to be executed against the MongoDB database. -- `single_row` (bool, optional): If `True`, the function will return only the first result. - -## Outputs - -The `query` function returns either a `list` or a `tuple` of results, depending on whether `single_row` was set to `True` or not. If `single_row` was `True`, then the function returns a single result. - -## Authentication Parameters - -The following authentication parameters are used to connect to the MongoDB database: - -- `host` (str): The MongoDB connection URI. It can be a full uri with database, authSource, user, pass; or just hostip. -- `username` (str, optional): The MongoDB username. -- `password` (str, optional): The MongoDB password. -- `database` (str, optional): The name of the MongoDB database. -- `authSource` (str, optional): The name of the database against which authentication needs to be done. -- `additional_options` (str, optional): Additinal options to be passed to MongoClient as kwargs. ## Connecting with the Provider @@ -44,4 +27,4 @@ In order to connect to the MongoDB database, you can use either a connection URI ## Useful Links -- [MongoDB Documentation](https://docs.mongodb.com/) +- [MongoDB Documentation](https://docs.mongodb.com/) \ No newline at end of file diff --git a/docs/providers/documentation/mysql-provider.mdx b/docs/providers/documentation/mysql-provider.mdx index 3d5edb82bb..70a8e27f67 100644 --- a/docs/providers/documentation/mysql-provider.mdx +++ b/docs/providers/documentation/mysql-provider.mdx @@ -3,26 +3,9 @@ title: "MySQL" sidebarTitle: "MySQL Provider" description: "MySQL Provider is a provider used to query MySQL databases" --- +import AutoGeneratedSnippet from '/snippets/providers/mysql-snippet-autogenerated.mdx'; -## Inputs - -The `query` function of `MysqlProvider` takes the following arguments: - -- `query` (str): A string containing the query to be executed against the MySQL database. -- `single_row` (bool, optional): If `True`, the function will return only the first result. - -## Outputs - -The `query` function returns either a `list` or a `tuple` of results, depending on whether `single_row` was set to `True` or not. If `single_row` was `True`, then the function returns a single result. - -## Authentication Parameters - -The following authentication parameters are used to connect to the MySQL database: - -- `username` (str): The MySQL username. -- `password` (str): The MySQL password. -- `host` (str): The MySQL hostname. -- `database` (str, optional): The name of the MySQL database. + ## Connecting with the Provider diff --git a/docs/providers/documentation/netbox-provider.mdx b/docs/providers/documentation/netbox-provider.mdx new file mode 100644 index 0000000000..c1cbbfd786 --- /dev/null +++ b/docs/providers/documentation/netbox-provider.mdx @@ -0,0 +1,88 @@ +--- +title: 'NetBox' +sidebarTitle: 'NetBox Provider' +description: 'NetBox provider allows you to get events from NetBox through webhook.' +--- +import AutoGeneratedSnippet from '/snippets/providers/netbox-snippet-autogenerated.mdx'; + +## Overview + +NetBox is the leading solution for modeling and documenting modern networks. By combining the traditional disciplines of IP address management (IPAM) and datacenter infrastructure management (DCIM) with powerful APIs and extensions, NetBox provides the ideal "source of truth" to power network automation. Read on to discover why thousands of organizations worldwide put NetBox at the heart of their infrastructure. + +## Connecting NetBox to Keep + +To connect NetBox to Keep, you need to create a webhook in NetBox. + +1. Go to NetBox dashboard, click on `Webhooks` under `Operations` section in the sidebar. + + + + + +2. Add a new webhook by clicking on `Add` button. + + + + + +3. Enter [https://api.keephq.dev/alerts/event/netbox](https://api.keephq.dev/alerts/event/netbox) as the URL and select the request method as `POST`. + +4. Follow the below steps to create a new API key in Keep. + +5. Go to Keep dashboard and click on the profile icon in the botton left corner and click `Settings`. + + + + + +6. Select `Users and Access` tab and then select `API Keys` tab and create a new API key. + + + + + +7. Give name and select the role as `webhook` and click on `Create API Key`. + + + + + +8. In the `Additional headers` field enter `X-API-KEY` as the key and the API key generated in step 7 as the value. It should look like below. Refer the screenshot from step 3. + +``` +X-API-KEY: your-api-key +``` + +9. Disable the `SSL verification` (Optional) or enable it based on your requirement. + + + + + +10. Click on `Save` to save the webhook. + +11. Go to `Event Rules` under `Operations` section in the sidebar and click on `Add` button to create a new event rule. + + + + + +12. Fill the required fields based on your requirement. Select the `Object types` and `Event types` for which you want to receive the events. + + + + + +13. In the `Action type` select `Webhook` and select the webhook created in step 3 and click on `Save`. + + + + + +Now, you have successfully connected NetBox to Keep. You will start receiving the events in Keep based on the event rules you have created. + +## Useful Links + +- [NetBox](https://netboxlabs.com/) + + diff --git a/docs/providers/documentation/netdata-provider.mdx b/docs/providers/documentation/netdata-provider.mdx index b013564ecf..a4f68eae08 100644 --- a/docs/providers/documentation/netdata-provider.mdx +++ b/docs/providers/documentation/netdata-provider.mdx @@ -3,28 +3,13 @@ title: "Netdata" sidebarTitle: "Netdata Provider" description: "Netdata provider allows you to get alerts from Netdata via webhooks." --- +import AutoGeneratedSnippet from '/snippets/providers/netdata-snippet-autogenerated.mdx'; ## Overview The Netdata Provider enables seamless integration between Keep and Netdata, allowing alerts from Netdata to be directly sent to Keep through webhook configurations. This integration ensures that critical alerts are efficiently managed and responded to within Keep's platform. -## Connecting Netdata to Keep - -To connect Netdata to Keep, you need to configure it as a webhook from Netdata. Follow the steps below to set up the integration: - -1. In Netdata, go to Space settings. -2. Go to "Alerts & Notifications". -3. Click on "Add configuration". -4. Add "Webhook" as the notification method. -5. Add a name to the configuration. -6. Select Room(s) to apply the configuration. -7. Select Notification(s) to apply the configuration. -8. In the "Webhook URL" field, add `https://api.keephq.dev/alerts/event/netdata`. -9. Generate an API key with webhook role from the Keep settings. -10. Add a request header with the key "x-api-key" and API key as the value. -11. Leave the Authentication as "No Authentication". -12. Add the "Challenge secret" as "keep-netdata-webhook-integration". -13. Save the configuration. + ## Useful Links diff --git a/docs/providers/documentation/new-relic-provider.mdx b/docs/providers/documentation/new-relic-provider.mdx index 165f74cb46..701cd08a3c 100644 --- a/docs/providers/documentation/new-relic-provider.mdx +++ b/docs/providers/documentation/new-relic-provider.mdx @@ -3,15 +3,9 @@ title: "New Relic" sidebarTitle: "New Relic Provider" description: "New Relic Provider enables querying AI alerts and registering webhooks." --- +import AutoGeneratedSnippet from '/snippets/providers/newrelic-snippet-autogenerated.mdx'; -## Inputs - -- `account_id` (required): Account id of the new relic account. - -## Authentication Parameters -- `account_id` (required): Account id of the new relic account. -- `api_key` (required): New Relic User key. To receive webhooks, use `User key` of an admin account. -- `api_url` (required): API url to query from NRQL either US or EU based. + ## Connecting with the Provider diff --git a/docs/providers/documentation/ntfy-provider.mdx b/docs/providers/documentation/ntfy-provider.mdx index 994bd41a83..ea126c7fd9 100644 --- a/docs/providers/documentation/ntfy-provider.mdx +++ b/docs/providers/documentation/ntfy-provider.mdx @@ -3,15 +3,9 @@ title: "Ntfy.sh" sidebarTitle: "Ntfy.sh Provider" description: "Ntfy.sh allows you to send notifications to your devices" --- +import AutoGeneratedSnippet from '/snippets/providers/ntfy-snippet-autogenerated.mdx'; -## Authentication Parameters - -The Ntfy.sh provider requires the following authentication parameters: - -- `Ntfy Access Token`: The access token for the Ntfy.sh account. This is required for the Ntfy.sh provider. -- `Ntfy Host URL`: (For self-hosted Ntfy) The URL of the self-hosted Ntfy instance in the format `https://ntfy.example.com`. -- `Ntfy Username`: (For self-hosted Ntfy) The username for the self-hosted Ntfy instance. -- `Ntfy Password`: (For self-hosted Ntfy) The password for the self-hosted Ntfy instance. + ## Connecting with the Provider @@ -35,24 +29,6 @@ Subscribing to a Topic (For Ntfy.sh and self-hosted Ntfy) 3. Copy the generated topic name. This will be used as the `Ntfy Subcription Topic` in the provider settings. 4. Reserve the topic and confiure access (Requires ntfy Pro) -## Example of usage -``` -workflow: - id: ntfy-example - description: ntfy-example - triggers: - - type: manual - actions: - - name: ntfy - provider: - type: ntfy - config: "{{ providers.ntfy }}" - with: - message: "test-message" - topic: "test-topic" - -``` - ## Usefull Links - [Ntfy.sh](https://ntfy.sh/) diff --git a/docs/providers/documentation/ollama-provider.mdx b/docs/providers/documentation/ollama-provider.mdx new file mode 100644 index 0000000000..8659c2b900 --- /dev/null +++ b/docs/providers/documentation/ollama-provider.mdx @@ -0,0 +1,30 @@ +--- +title: "Ollama Provider" +description: "The Ollama Provider allows for integrating locally running Ollama language models into Keep." +--- +import AutoGeneratedSnippet from '/snippets/providers/ollama-snippet-autogenerated.mdx'; + + + The Ollama Provider supports querying local Ollama models for prompt-based + interactions. Make sure you have Ollama installed and running locally with your desired models. + + +### **Cloud Limitation** +This provider is disabled for cloud environments and can only be used in local or self-hosted environments. + + + +## Connecting with the Provider + +To use the Ollama Provider: + +1. Install Ollama on your system from [Ollama's website](https://ollama.ai). +2. Start the Ollama service. +3. Pull your desired model(s) using `ollama pull model-name`. +4. Configure the host URL in your Keep configuration. + +## Prerequisites + +- Ollama must be installed and running on your system. +- The desired models must be pulled and available in your Ollama installation. +- The Ollama API must be accessible from the host where Keep is running. \ No newline at end of file diff --git a/docs/providers/documentation/openai-provider.mdx b/docs/providers/documentation/openai-provider.mdx new file mode 100644 index 0000000000..952aafb7df --- /dev/null +++ b/docs/providers/documentation/openai-provider.mdx @@ -0,0 +1,23 @@ +--- +title: "OpenAI Provider" +description: "The OpenAI Provider allows for integrating OpenAI's language models into Keep." +--- +import AutoGeneratedSnippet from '/snippets/providers/openai-snippet-autogenerated.mdx'; + + + The OpenAI Provider supports querying GPT language models for prompt-based + interactions. + + + + +## Connecting with the Provider + +To connect to OpenAI, you'll need to obtain an API Key and (optionally) an Organization ID: + +1. Log in to your OpenAI account at [OpenAI Platform](https://platform.openai.com). +2. Go to the **API Keys** section. +3. Click on **Create new secret key** to generate a key for Keep. +4. (Optional) Retrieve your **Organization ID** under **Organization settings** if you’re part of multiple organizations. + +Use the generated API key in the `authentication` section of your OpenAI Provider configuration. diff --git a/docs/providers/documentation/openobserve-provider.mdx b/docs/providers/documentation/openobserve-provider.mdx index 610f2947e1..dad85d0f5b 100644 --- a/docs/providers/documentation/openobserve-provider.mdx +++ b/docs/providers/documentation/openobserve-provider.mdx @@ -3,15 +3,9 @@ title: "OpenObserve" sidebarTitle: "OpenObserve Provider" description: "OpenObserve provider allows you to get OpenObserve `alerts/actions` via webhook installation" --- +import AutoGeneratedSnippet from '/snippets/providers/openobserve-snippet-autogenerated.mdx'; -## Authentication Parameters -The OpenObserve provider requires the following authentication parameters: - -- `OpenObserve Username`: Required. This is your OpenObserve account username. -- `OpenObserve Password`: This is the password associated with your OpenObserve Username. -- `OpenObserve Host`: This is the hostname of the OpenObserve instance you wish to connect to. It identifies the OpenObserve server that the API will interact with. -- `OpenObserve Port`: This is the port number for the OpenObserve host, default is 5080. -- `Organisation ID`: The ID of the organisation in which you would like to install the webhook. + ## Connecting with the Provider diff --git a/docs/providers/documentation/opensearchserverless-provider.mdx b/docs/providers/documentation/opensearchserverless-provider.mdx new file mode 100644 index 0000000000..635618081e --- /dev/null +++ b/docs/providers/documentation/opensearchserverless-provider.mdx @@ -0,0 +1,121 @@ +--- +title: "OpenSearch Serverless" +sidebarTitle: "OpenSearchServerless Provider" +description: "OpenSearch Serverless provider enables seamless integration with AWS OpenSearch Serverless for document-level querying, alerting, and writing, directly into Keep." +--- +import AutoGeneratedSnippet from '/snippets/providers/opensearchserverless-snippet-autogenerated.mdx'; + +## Overview + +The OpenSearch Provider offers native integration with **Amazon OpenSearch Serverless**, allowing Keep users to query, monitor, and write documents in real-time. This supports observability and event-driven alerting for operational and security use cases. + +### Key Features: + +- **Read & Write Support**: Enables both querying and writing documents to OpenSearch Serverless collections. +- **AWS IAM Authentication**: Authenticates using AWS IAM credentials (access key/secret or instance role). + +## Connecting with the Provider + +To connect OpenSearch with Keep, you’ll need: + +- An AWS account with permissions for OpenSearch Serverless (AOSS). +- A configured collection and index in AOSS. +- AWS IAM credentials (permanent or temporary). + +## Required AWS IAM Permissions (Scopes) + +To function properly, the OpenSearch provider requires the following IAM scopes: + +### Mandatory Scopes + +- **`iam:SimulatePrincipalPolicy`** + - **Description**: Required to check if the IAM identity has access to AOSS API. + - **Alias**: Needed to test the access for next 3 scopes. + - **Mandatory**: Yes + +- **`aoss:APIAccessAll`** + - **Description**: Required to make API calls to OpenSearch Serverless. + - **Alias**: Access to make API calls to serverless + - **Mandatory**: Yes + +- **`aoss:ListAccessPolicies`** + - **Description**: Needed to list all Data Access Policies. + - **Alias**: Policy List access + - **Mandatory**: Yes + +- **`aoss:GetAccessPolicy`** + - **Description**: Required to inspect each policy for read/write scope. + - **Alias**: Policy read access + - **Mandatory**: Yes + +- **`aoss:CreateIndex`** + - **Description**: Required to create an index. + - **Documentation**: [AOSS API Docs](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/serverless-genref.html#serverless-operations) + - **Alias**: Create Index + - **Mandatory**: Yes + +- **`aoss:ReadDocument`** + - **Description**: Required to read documents from an OpenSearch collection. + - **Documentation**: [AOSS API Docs](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/serverless-genref.html#serverless-operations) + - **Alias**: Read Documents + - **Mandatory**: Yes + +- **`aoss:WriteDocument`** + - **Description**: Required to index or update documents in an OpenSearch collection. + - **Documentation**: [AOSS API Docs](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/serverless-genref.html#serverless-operations) + - **Alias**: Write Documents + - **Mandatory**: Yes + + +`iam:SimulatePrincipalPolicy`, `aoss:APIAccessAll`, `aoss:ListAccessPolicies`, `aoss:GetAccessPolicy`, needs to be added from your IAM console to the IAM identity used by Keep. +The other two policies are data access policies which needs to be added from aws serverless dashboard. +Go through the readme to get step by step setup: [README](https://github.com/keep/keep/providers/opensearchserverless_provider\README.md) + + +## Authentication Configuration + +To authenticate with OpenSearch Serverless, provide the following: + +- **AWS Access Key** (Mandatory): Your AWS access key. +- **AWS Access Key Secret** (Mandatory): Your AWS access key secret. +- **Region** (Mandatory): The AWS region hosting your OpenSearch collection. +- **Domain Endpoint** (Mandatory): The full domain URL of your AOSS collection endpoint. + + +## Setting Up the Integration +### Steps: + +1. **Assign IAM Permissions**: Grant your IAM user/role `aoss:CreateIndex`, `aoss:ReadDocument` and `aoss:WriteDocument` on the target collection. +2. **Configure Keep Provider**: Provide access key, secret, region, and collection endpoint in the Keep platform. + +## Querying OpenSearch + +Keep supports standard OpenSearch queries using the `_search` endpoint: +- **index**: The name of the OpenSearch index to query. +- **query**: A valid OpenSearch query DSL object. + +### Example + +```json +{ + "query": { + "match_all": {} + }, + "size": 1 +} +``` + + +## Writing to OpenSearch + +You can use the `_notify` functionality to push documents into OpenSearch collections. +- **index**: The index name where the document should be written. +- **document**: A Python dictionary representing the document body. +- **id**: ID for the document + + +## Useful Links + +- [AWS OpenSearch Serverless Documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/serverless.html) +- [AOSS Data Access Control](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/serverless-data-access.html) +- [README](https://github.com/keep/keep/providers/opensearchserverless_provider\README.md) diff --git a/docs/providers/documentation/openshift-provider.mdx b/docs/providers/documentation/openshift-provider.mdx index f097c97095..162b855cd0 100644 --- a/docs/providers/documentation/openshift-provider.mdx +++ b/docs/providers/documentation/openshift-provider.mdx @@ -2,23 +2,9 @@ title: "Openshift" description: "Openshift provider to perform rollout restart action on specific resources." --- +import AutoGeneratedSnippet from '/snippets/providers/openshift-snippet-autogenerated.mdx'; -## Inputs - -- **kind** (required): Kind of the object which will be run rollout restart action run (`deployments`, `statefulset`, `daemonset`). -- **name** (required): Name of the object which will be run rollout restart action run. - -## Outputs - -- **message**: Message for the action performed. - -## Authentication Parameters - -This provider offers you to authenticate with Openshift using: api_server, token and insecure. - -- **api_server** (required): The api server url of your Openshift cluster. -- **token** (required): The token of your user to authenticate with Openshift. -- **insecure** (optional): If you want to skip the certificate verification, set this to `True`. + ## Connecting with the Provider diff --git a/docs/providers/documentation/opsgenie-provider.mdx b/docs/providers/documentation/opsgenie-provider.mdx index 7eb664d7ee..a7ecc3d5c0 100644 --- a/docs/providers/documentation/opsgenie-provider.mdx +++ b/docs/providers/documentation/opsgenie-provider.mdx @@ -2,53 +2,41 @@ title: "Opsgenie Provider" description: "OpsGenie Provider is a provider that allows to create alerts in OpsGenie." --- +import AutoGeneratedSnippet from '/snippets/providers/opsgenie-snippet-autogenerated.mdx'; -## Inputs + -The `notify` function in the `OpsgenieProvider` use OpsGenie [CreateAlertPayload](https://github.com/opsgenie/opsgenie-python-sdk/blob/master/docs/CreateAlertPayload.md): - -### Properties - -| Name | Type | Description | Notes | -| --------------- | ---------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | ---------- | -| **user** | **str** | Display name of the request owner | [optional] | -| **note** | **str** | Additional note that will be added while creating the alert | [optional] | -| **source** | **str** | Source field of the alert. Default value is IP address of the incoming request | [optional] | -| **message** | **str** | Message of the alert | | -| **alias** | **str** | Client-defined identifier of the alert, that is also the key element of alert deduplication. | [optional] | -| **description** | **str** | Description field of the alert that is generally used to provide a detailed information about the alert. | [optional] | -| **responders** | **list**[[Recipient](https://github.com/opsgenie/opsgenie-python-sdk/blob/master/docs/Recipient.md)] | Responders that the alert will be routed to send notifications | [optional] | -| **visible_to** | **list**[[Recipient](https://github.com/opsgenie/opsgenie-python-sdk/blob/master/docs/Recipient.md)] | Teams and users that the alert will become visible to without sending any notification | [optional] | -| **actions** | **list[str]** | Custom actions that will be available for the alert | [optional] | -| **tags** | **list[str]** | Tags of the alert | [optional] | -| **details** | **dict(str, str)** | Map of key-value pairs to use as custom properties of the alert | [optional] | -| **entity** | **str** | Entity field of the alert that is generally used to specify which domain alert is related to | [optional] | -| **priority** | **str** | Priority level of the alert | [optional] | +## Connecting with the Provider -## Authentication Parameters +To use the Opsgenie Provider, you'll need to provide the API Key and Integration Name from API Integration. You can create an API integration under Settings -> Integrations -> Add integration and search for API Integration. Select API and provide a name for the integration and click on continue. -The OpsgenieProviderAuthConfig class takes the following parameters: +You can create an integration key under Settings -> Integrations -> Add integration -```python -api_key (str | None): API key, which is a user or team API key. Optional, default is `None`. *Required* -``` + + If you are in the free tier, the integration key can be created under Teams -> + Your team -> Integrations -> Add Integration (API) + -## Connecting with the Provider +Visit the [Opsgenie API Integration](https://app.opsgenie.com/settings/integrations/create/api) for creating an API integration quickly. -To use the Opsgenie Provider, you'll need to provide api_key. + + + -You can create an integration key under Settings -> Integrations -> Add API -Note: if you are in the free tier, the integration key can be created under Teams -> Your team -> Integrations -> Add Integration (API) + + + -## Scopes + + + -Certain scopes may be required to perform specific actions or queries via the Opsgenie Provider. Below is a summary of relevant scopes and their use cases: + + + -- opsgenie:create (Create alerts) - Required: True - Description: It allows to create, close and comment OpsGenie alerts. +Visit the [Opsgenie API Integration](https://support.atlassian.com/opsgenie/docs/create-a-default-api-integration/) documentation for latest information. ## Useful Links - How to create Opsgenie API Integration - https://support.atlassian.com/opsgenie/docs/create-a-default-api-integration/ -- How to get Opsgenie Integration Api Key - https://community.atlassian.com/t5/Opsgenie-questions/OpsGenie-API-Create-alert-Authentication-problem/qaq-p/1531047?utm_source=atlcomm&utm_medium=email&utm_campaign=immediate_general_question&utm_content=topic#U1531256 diff --git a/docs/providers/documentation/pagerduty-provider.mdx b/docs/providers/documentation/pagerduty-provider.mdx index 3461b99562..2296cf5191 100644 --- a/docs/providers/documentation/pagerduty-provider.mdx +++ b/docs/providers/documentation/pagerduty-provider.mdx @@ -1,53 +1,81 @@ --- title: "Pagerduty Provider" -description: "Pagerduty Provider is a provider that allows to create incidents or post events to Pagerduty." +description: "Pagerduty Provider allows integration with PagerDuty to create, manage, and synchronize incidents and alerts within Keep." --- +import AutoGeneratedSnippet from '/snippets/providers/pagerduty-snippet-autogenerated.mdx'; -## Inputs +## Description -- `title`: str: Title of the alert or incident. -- `alert_body`: str: UTF-8 string of custom message for alert. Shown in incident body for events, and in the body for incidents. -- `dedup`: str | None: Any string, max 255 characters, used to deduplicate alerts for events. -- `service_id`: str: ID of the service for incidents. -- `body`: dict: Body of the incident. -- `requester`: str: Requester of the incident. -- `incident_key`: str | None: Key to identify the incident. If not given, a UUID will be generated. +The Pagerduty Provider enables integration with PagerDuty to create, manage, and synchronize incidents and alerts within Keep. It supports both direct API key authentication and OAuth2, allowing greater flexibility for secure integration. -## Authentication Parameters + -The `api_key` or `routing_key` are required for connecting to the Pagerduty provider. You can obtain them as described in the "Connecting with the Provider" section. +## Connecting with the Provider -Routing key, which is an integration or ruleset key. API key, which is a user or team API key. +To connect Keep to PagerDuty: -## Connecting with the Provider +- **Routing Key**: Use for event posting via the PagerDuty Events API. In the PagerDuty UI, this is displayed as the integration key. +- **API Key**: Use for incident creation and management through the PagerDuty Incidents API. +- **Service Id** (Optional): If provided, keep operates within the service's scope. +- **OAuth2**: Token management handled automatically by Keep. -To use the PagerdutyProvider, you'll need to provide either a routing_key or an api_key. + + + -You can find your integration key or routing key in the PagerDuty web app under **Configuration** > **Integrations**, and select the integration you want to use. + +You can find your routing key in the PagerDuty (integration key in PagerDuty UI) web app under **Services** > **Service Directory** > **Your service** > **Integrations** > **Expand Events API**, and select the integration you want to use. You can find your API key in the PagerDuty web app under **Configuration** > **API Access**. -The routing_key is used to post events to Pagerduty using the events API. +The routing_key is used to post events to PagerDuty using the events API. The api_key is used to create incidents using the incidents API. -## Scopes + + +### Enabling OAuth in the open-source version + +If you would like to use OAuth in the open-source, where you self-host Keep, you can do so by following these step: + +1. Create a PagerDuty account +2. In the account page, go to **Integrations** > **App Registration** + + + +3. Click on **New App** blue button on the top right +4. Fill in the required fields +5. Select "OAuth 2.0" in the Functionality section and click **Next** +6. In the Redirect URL, you need to add Keep's PagerDuty OAuth2 redirect URL, which is based on your deployments URL. For example, if Keep is deployed at http://localhost:3000, the redirect URL is http://localhost:3000/providers/oauth2/pagerduty + + + +7. In the Authorization section, select **Scoped OAuth** and select the following scopes: + +- Abilities: Read Access +- Incidents: Read/Write Access +- Services: Read/Write Access +- Webhook Subscriptions: Read/Write Access -Certain scopes may be required to perform specific actions or queries via the Pagerduty Provider. Below is a summary of relevant scopes and their use cases: +8. Click on **Register App** blue button on the bottom right +9. Copy the **Client ID** and **Client Secret** from the OAuth 2.0 Client Information modal and set the `PAGERDUTY_CLIENT_ID` and `PAGERDUTY_CLIENT_SECRET` environment variables in your Keep backend deployment. + + + -- incidents_read (Incidents Read) - Required: True - Description: View incidents. -- incidents_write (Incidents Write) - Required: False - Description: Write incidents. -- webhook_subscriptions_read (Webhook Subscriptions Read) - Required: False - Description: View webhook subscriptions. - (*Required for auto-webhook integration) -- webhook_subscriptions_write (Webhook Subscriptions Write) - Required: False - Description: Write webhook subscriptions. - (*Required for auto-webhook integration) +## PagerDuty Webhook Integration +By default, when Keep installs itself as a webhook integration, it subscribes to all incident events ("Account Scope"). + + + + + +If you wish to limit Keep to some specific services, you can do so by selecting the **Service** scope and selecting the services you want to subscribe to. + + + + + +Find this page under **Integrations** > **Generic Webhooks (v3)** ## Notes @@ -59,6 +87,7 @@ An expired trial while using the free version of PagerDuty may result in the "pa The webhook integration adds Keep as a destination within the "Integrations" API within Pagerduty. This grants Keep access to the following scopes within Pagerduty: + - `webhook_subscriptions_read` - `webhook_subscriptions_write` diff --git a/docs/providers/documentation/pagertree-provider.mdx b/docs/providers/documentation/pagertree-provider.mdx index 59e93b793a..ed18bdec31 100644 --- a/docs/providers/documentation/pagertree-provider.mdx +++ b/docs/providers/documentation/pagertree-provider.mdx @@ -2,32 +2,9 @@ title: "Pagertree Provider" description: "The Pagertree Provider facilitates interactions with the Pagertree API, allowing the retrieval and management of alerts." --- +import AutoGeneratedSnippet from '/snippets/providers/pagertree-snippet-autogenerated.mdx'; -## Inputs - -The `notify` function in the `PagertreeProvider` class takes the following parameters: - -```python -kwargs(dict): - title (str): Title of the alert or incident. *Required* - urgency (Literal["low", "medium", "high", "critical"]): Defines the urgency of the alert. *Required* - incident (bool, default=False): If True, sends data as an incident. *Optional* - severities (Literal["SEV-1", "SEV-2", "SEV-3", "SEV-4", "SEV-5", "SEV_UNKNOWN"], default="SEV-5"): Specifies the severity level of the incident. *Optional* - incident_message (str, default=""): Message describing the incident. *Optional* - description (str, default=""): Detailed description of the alert or incident. *Optional* - status (Literal["queued", "open", "acknowledged", "resolved", "dropped"], default="queued"): Status of the alert or incident. *Optional* - destination_team_ids (list[str], default=[]): List of team IDs that the alert or incident will be sent to. *Optional* - destination_router_ids (list[str], default=[]): List of router IDs that the alert or incident will be sent to. *Optional* - destination_account_user_ids (list[str], default=[]): List of account user IDs that the alert or incident will be sent to. *Optional* - **kwargs (dict): Additional keyword arguments that might be needed for future use. *Optional* -``` - - -### Authentication Parameters - -The `PagertreeProviderAuthConfig` class takes the following parameters: -- api_token (str): Your Pagertree API Token. *Required* - + ## Connecting with the Provider diff --git a/docs/providers/documentation/parseable-provider.mdx b/docs/providers/documentation/parseable-provider.mdx new file mode 100644 index 0000000000..d72ee701a3 --- /dev/null +++ b/docs/providers/documentation/parseable-provider.mdx @@ -0,0 +1,16 @@ +--- +title: "Parseable" +sidebarTitle: "Parseable Provider" +description: "Parseable provider allows integration with Parseable, a tool for collecting and querying logs." +--- +import AutoGeneratedSnippet from '/snippets/providers/parseable-snippet-autogenerated.mdx'; + + + +## Connecting with the Provider + +1. Obtain an API key from your Parseable instance. +2. Configure your provider using the `api_key` and `parseable_url`. + +## Usefull Links +-[Parseable API Documentation](https://www.parseable.com/docs/api) \ No newline at end of file diff --git a/docs/providers/documentation/pingdom-provider.mdx b/docs/providers/documentation/pingdom-provider.mdx new file mode 100644 index 0000000000..ac8a0426c4 --- /dev/null +++ b/docs/providers/documentation/pingdom-provider.mdx @@ -0,0 +1,33 @@ +--- +title: "Pingdom" +sidebarTitle: "Pingdom Provider" +description: "Pingdom provider allows you to pull alerts from Pingdom or install Keep as webhook." +--- +import AutoGeneratedSnippet from '/snippets/providers/pingdom-snippet-autogenerated.mdx'; + + + +## Connecting with the Provider + +### API Key + +To obtain the Pingdom API key, follow these steps: + +1. Log in to your Pingdom account. +2. Navigate to the "Settings" section. +3. Click on the "Pingdom API" tab. +4. Generate a new API Key. + + +## Fingerprinting + +Fingerprints in Pingdom are calculated based on the `check_id` incoming/pulled event. + +## Notes + +_No information yet, feel free to contribute it using the "Edit this page" link at the bottom of the page_ + +## Useful Links + +- [Pingdom Webhook Documentation](https://www.pingdom.com/resources/webhooks) +- [Pingdom Actions API](https://docs.pingdom.com/api/#tag/Actions) diff --git a/docs/providers/documentation/pingdom_provider.mdx b/docs/providers/documentation/pingdom_provider.mdx deleted file mode 100644 index 42c5d036b5..0000000000 --- a/docs/providers/documentation/pingdom_provider.mdx +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "Pingdom" -sidebarTitle: "Pingdom Provider" -description: "Pingdom provider allows you to pull alerts from Pingdom or install Keep as webhook." ---- - -## Inputs - -Pingdom Provider does not currently support the `notify` function. - -## Outputs - -Pingdom Provider does not currently support the `query` function. - -## Authentication Parameters - -The `api_key` is required for connecting to the Pingdom provider. You can obtain them as described in the "Connecting with the Provider" section. - -## Connecting with the Provider - -### API Key - -To obtain the Pingdom API key, follow these steps: - -1. Log in to your Pingdom account. -2. Navigate to the "Settings" section. -3. Click on the "Pingdom API" tab. -4. Generate a new API Key. - - -## Fingerprinting - -Fingerprints in Pingdom are calculated based on the `check_id` incoming/pulled event. - -## Scopes - -- read (Read) - Required: True - Description: Read data from your Pingdom account. - -## Notes - -_No information yet, feel free to contribute it using the "Edit this page" link at the bottom of the page_ - -## Useful Links - -- [Pingdom Webhook Documentation](https://www.pingdom.com/resources/webhooks) -- [Pingdom Actions API](https://docs.pingdom.com/api/#tag/Actions) diff --git a/docs/providers/documentation/planner-provider.mdx b/docs/providers/documentation/planner-provider.mdx index f6a538c109..6bf8541c7f 100644 --- a/docs/providers/documentation/planner-provider.mdx +++ b/docs/providers/documentation/planner-provider.mdx @@ -2,24 +2,9 @@ title: "Microsoft Planner Provider" description: "Microsoft Planner Provider to create task in planner." --- +import AutoGeneratedSnippet from '/snippets/providers/planner-snippet-autogenerated.mdx'; -## Inputs - -- **title** (required): The title of the incident. -- **plan_id** (required): Plan id inside which the task will be created. -- **bucket_id** (optional): Bucket id (unique id of the board inside a plan) inside which the task should be created, if not provided the task will be created in `No bucket` board. - -## Outputs - -Microsoft Planner Provider does not currently support the `query` function. - -## Authentication Parameters - -The Microsoft Planner Provider uses client_id, client_secret and tenant_id to generate access_token for authentication. You need to provide the following authentication parameters to connect to Microsoft Planner Provider: - -- **client_id** (required): The client id of your registered application in azure. -- **client_secret** (required): The client secret generated inside your registered application in azure. -- **tenant_id** (required): The tenant id where the authentication app was registered in azure. + ## Connecting with the Provider diff --git a/docs/providers/documentation/postgresql-provider.mdx b/docs/providers/documentation/postgresql-provider.mdx index 5e3236b46d..5c1c2b8a1d 100644 --- a/docs/providers/documentation/postgresql-provider.mdx +++ b/docs/providers/documentation/postgresql-provider.mdx @@ -3,27 +3,9 @@ title: "PostgreSQL" sidebarTitle: "PostgreSQL Provider" description: "PostgreSQL Provider is a provider used to query POSTGRES databases" --- +import AutoGeneratedSnippet from '/snippets/providers/postgres-snippet-autogenerated.mdx'; -## Inputs - -The `query` function of `PsqlProvider` takes the following arguments: - -- `query` (str): A string containing the query to be executed against the POSTGRES database. -- `single_row` (bool, optional): If `True`, the function will return only the first result. - -## Outputs - -The `query` function returns either a `list` or a `tuple` of results, depending on whether `single_row` was set to `True` or not. If `single_row` was `True`, then the function returns a single result. - -## Authentication Parameters - -The following authentication parameters are used to connect to the POSTGRES database: - -- `user` (str): The Postgres username. -- `password` (str): The Postgres password. -- `host` (str): The Postgres hostname. -- `dbname` (str, optional): The name of the Postgres database. -- `port` (str, optional): The Postgres server port. + ## Connecting with the Provider diff --git a/docs/providers/documentation/posthog-provider.mdx b/docs/providers/documentation/posthog-provider.mdx new file mode 100644 index 0000000000..441db1bf5a --- /dev/null +++ b/docs/providers/documentation/posthog-provider.mdx @@ -0,0 +1,125 @@ +--- +title: "PostHog" +sidebarTitle: "PostHog Provider" +description: "PostHog provider allows you to query session recordings and analytics data from PostHog." +--- +import AutoGeneratedSnippet from '/snippets/providers/posthog-snippet-autogenerated.mdx'; + + + +## Connecting with the Provider + +### API Key + +To obtain the PostHog API key, follow these steps: + +1. Log in to your PostHog account. +2. Navigate to "Project Settings" > "API Keys". +3. Create a new API key or use an existing one. +4. Copy the API key value. + +### Project ID + +To find your PostHog project ID: + +1. Log in to your PostHog account. +2. The project ID is visible in your project settings or in the URL when you're viewing your project. + +## Available Methods + +The PostHog provider offers the following methods: + +### Get Session Recording Domains + +Retrieve a list of domains from session recordings within a specified time period. + +```yaml +- name: get-posthog-domains + provider: + config: "{{ providers.posthog }}" + type: posthog + with: + query_type: session_recording_domains + hours: 24 # Number of hours to look back + limit: 500 # Maximum number of recordings to fetch +``` + +### Get Session Recordings + +Retrieve session recordings data within a specified time period. + +```yaml +- name: get-posthog-recordings + provider: + config: "{{ providers.posthog }}" + type: posthog + with: + query_type: session_recordings + hours: 24 # Number of hours to look back + limit: 100 # Maximum number of recordings to fetch +``` + +## Example Workflow + +Here's an example workflow that tracks domains from PostHog session recordings over the last 24 hours and sends a summary to Slack: + +```yaml +workflow: + id: posthog-domain-tracker + name: PostHog Domain Tracker + description: Tracks domains from PostHog session recordings over the last 24 hours and sends a summary to Slack. + triggers: + - type: manual + - type: interval + value: 86400 # Run daily (in seconds) + steps: + - name: get-posthog-domains + provider: + config: "{{ providers.posthog }}" + type: posthog + with: + query_type: session_recording_domains + hours: 24 + limit: 500 + actions: + - name: send-to-slack + provider: + config: "{{ providers.slack }}" + type: slack + with: + blocks: + - type: header + text: + type: plain_text + text: "PostHog Session Recording Domains (Last 24 Hours)" + emoji: true + - type: section + text: + type: mrkdwn + text: "Found *{{ steps.get-posthog-domains.results.unique_domains_count }}* unique domains across *{{ steps.get-posthog-domains.results.total_domains_found }}* occurrences" + - type: divider + - type: section + text: + type: mrkdwn + text: "Domains:*" + - type: section + text: + type: mrkdwn + text: "{{#steps.get-posthog-domains.results.unique_domains}} + • *{{ . }}* + {{/steps.get-posthog-domains.results.unique_domains}}" + - type: divider +``` + +## Notes + +The PostHog provider requires the following scopes: +- `session_recording:read` - Allows reading session recordings data +- `project:read` - Allows reading project data +- `session_recording_playlist:read` - Optional access to recording playlists + +## Useful Links + +- [PostHog API Documentation](https://posthog.com/docs/api/overview) +- [PostHog Session Recordings API](https://posthog.com/docs/api/session-recordings) +- [PostHog Projects API](https://posthog.com/docs/api/projects) diff --git a/docs/providers/documentation/prometheus-provider.mdx b/docs/providers/documentation/prometheus-provider.mdx new file mode 100644 index 0000000000..ac4ddfa3ef --- /dev/null +++ b/docs/providers/documentation/prometheus-provider.mdx @@ -0,0 +1,19 @@ +--- +title: "Prometheus" +sidebarTitle: "Prometheus Provider" +description: "Prometheus provider allows integration with Prometheus for monitoring and alerting purposes." +--- +import AutoGeneratedSnippet from '/snippets/providers/prometheus-snippet-autogenerated.mdx'; + + + +## Connecting with the Provider + +1. Set up a Prometheus server and make sure it's running. +2. Get the `prometheus_url` where your Prometheus instance is accessible. +3. (Optional) Obtain the API token from your Prometheus configuration if it's protected. +4. Provide these values in the provider configuration. + +## Useful Links +-[Prometheus Querying API Documentation](https://prometheus.io/docs/prometheus/latest/querying/api/) +-[Prometheus Official Documentation](https://prometheus.io/docs/introduction/overview/) \ No newline at end of file diff --git a/docs/providers/documentation/pushover-provider.mdx b/docs/providers/documentation/pushover-provider.mdx index cf552e604c..13c2fc1329 100644 --- a/docs/providers/documentation/pushover-provider.mdx +++ b/docs/providers/documentation/pushover-provider.mdx @@ -3,26 +3,9 @@ title: "Pushover" sidebarTitle: "Pushover Provider" description: "Pushover docs" --- +import AutoGeneratedSnippet from '/snippets/providers/pushover-snippet-autogenerated.mdx'; -## Inputs - -The Pushover provider gets "message" as an input which will be used as the notification message. -Configuration example: - -``` -pushover: - authentication: - token: XXXXXXXXXXXXXXXX - user_key: XXXXXXXXXXXXXXXX -``` - -## Outputs - -None. - -## Authentication Parameters - -The Pushover provider gets two authentication parameters. + Token: ![Token](/images/token.jpeg) diff --git a/docs/providers/documentation/python-provider.mdx b/docs/providers/documentation/python-provider.mdx new file mode 100644 index 0000000000..4356aa0a35 --- /dev/null +++ b/docs/providers/documentation/python-provider.mdx @@ -0,0 +1,17 @@ +--- +title: "Python" +sidebarTitle: "Python Provider" +description: "Python provider allows executing Python code snippets." +--- +import AutoGeneratedSnippet from '/snippets/providers/python-snippet-autogenerated.mdx'; + + + +## Limitations + +- The Python provider is currently disabled for cloud execution. This means that Python scripts cannot be executed in a cloud environment. +- Users must ensure that the scripts are compatible with the local execution environment. + +## Usefull Links + +-[Python Documentation](https://docs.python.org/3/) \ No newline at end of file diff --git a/docs/providers/documentation/quickchart-provider.mdx b/docs/providers/documentation/quickchart-provider.mdx new file mode 100644 index 0000000000..f46868f26a --- /dev/null +++ b/docs/providers/documentation/quickchart-provider.mdx @@ -0,0 +1,56 @@ +--- +title: "QuickChart Provider" +sidebarTitle: "QuickChart Provider" +description: "The QuickChart provider enables the generation of chart images through a simple and open API, allowing visualization of alert trends and counts. It supports both anonymous usage and authenticated access with an API key for enhanced functionality." +--- +import AutoGeneratedSnippet from '/snippets/providers/quickchart-snippet-autogenerated.mdx'; + +# QuickChart Provider + +## Overview + +The QuickChart provider allows for the generation of two types of charts based on alert data within Keep's platform: + +1. A line chart that shows the trend of a specific fingerprint alert over time. +2. A radial gauge chart displaying the total number of alerts Keep received for this fingerprint. + +These charts can be used in various reports, dashboards, or alert summaries to provide visual insights into alert activity and trends. + + + + + + + + + + + +## Connecting with the Provider + +### Using QuickChart without an API Key + +The QuickChart provider can generate charts without the need for an API key. However, this usage is limited to basic functionality and lower request limits. + +### Using QuickChart with an API Key + +To unlock more advanced features and higher usage limits, you can use a QuickChart API key. Here's how to obtain one: + +1. Visit [QuickChart](https://quickchart.io/). +2. Sign up for a free account to get started. +3. Navigate to your account settings to find your API key. + +Once you have your API key, add it to the provider configuration in Keep. + +## Notes + +This provider is designed to offer flexible chart generation capabilities within Keep, enhancing how you visualize alert data and trends. It is ideal for users who want to quickly integrate visual representations of alert activity into their workflows. + +## Useful Links + +- [QuickChart API Documentation](https://quickchart.io/documentation/) +- [QuickChart Website](https://quickchart.io/) diff --git a/docs/providers/documentation/redmine-provider.mdx b/docs/providers/documentation/redmine-provider.mdx index d098ccb0c4..dffca6f91e 100644 --- a/docs/providers/documentation/redmine-provider.mdx +++ b/docs/providers/documentation/redmine-provider.mdx @@ -2,70 +2,13 @@ title: "Redmine" sidebarTitle: "Redmine Provider" --- +import AutoGeneratedSnippet from '/snippets/providers/redmine-snippet-autogenerated.mdx'; # Redmine Provider `RedmineProvider` is a class that integrates with Redmine to manage issue tracking through Keep. -## Inputs -The `_notify` function of `RedmineProvider` takes the following arguments: - -- `project_id` (str): Required. The ID of the Redmine project. -- `subject` (str): Required. The subject of the issue to be created. -- `priority_id` (str): Required. The priority ID for the issue. -- `description` (str): Optional. The description of the issue. -- `**kwargs` (dict): Optional. Additional parameters that can be passed as key-value pairs for the issue. - -## Outputs -The `_notify` function of `RedmineProvider` outputs the following format i.e. the created issue (example): - -```json -[ - { - "issue": { - "id": 2, - "project": { - "id": 1, - "name": "KeepHQ" - }, - "tracker": { - "id": 1, - "name": "Bug" - }, - "status": { - "id": 1, - "name": "New", - "is_closed": false - }, - "priority": { - "id": 4, - "name": "Urgent" - }, - "author": { - "id": 1, - "name": "UserName LastName" - }, - "subject": "Issue1", - "description": "A new Issue from KeepHQ", - "start_date": "2024-04-30", - "due_date": null, - "done_ratio": 0, - "is_private": false, - "estimated_hours": null, - "total_estimated_hours": null, - "created_on": "2024-04-30T11:59:17Z", - "updated_on": "2024-04-30T11:59:17Z", - "closed_on": null - } - } -] -``` - -## Authentication Parameters -The Redmine provider requires the following authentication parameters: - -- `host` (str): Required. The host URL of the Redmine server. -- `api_access_key` (str): Required. Redmine API Access Key. Refer to the [Redmine REST API documentation](https://www.redmine.org/projects/redmine/wiki/rest_api#Authentication) for details on obtaining an API key. + ## Connecting with the Provider To connect with the Redmine provider and manage issues through Keep, follow these steps: diff --git a/docs/providers/documentation/resend-provider.mdx b/docs/providers/documentation/resend-provider.mdx index 0996887ff3..bb08a3fcb9 100644 --- a/docs/providers/documentation/resend-provider.mdx +++ b/docs/providers/documentation/resend-provider.mdx @@ -2,41 +2,13 @@ title: "Resend" sidebarTitle: "Resend Provider" --- +import AutoGeneratedSnippet from '/snippets/providers/resend-snippet-autogenerated.mdx'; # Resend Provider ResendProvider is a class that implements the Resend API and allows email sending through Keep. -## Inputs -The `notify` function of `ResendProvider` takes the following arguments: - -- `_from` (str): Required. The email address of the sender. -- `to` (str): Required. The email address of the recipient. -- `subject` (str): Required. The subject of the email. -- `html` (str): Required. The HTML body of the email. -- `**kwargs` (optional): Additional optional parameters can be provided as key-value pairs. - -See [documentation](https://resend.com/docs/api-reference/emails/send-email) for more - -## Outputs -The `notify` function of `ResendProvider` outputs the following format (example): - -```json -{ - "id": "49a3999c-0ce1-4ea6-ab68-afcd6dc2e794", - "from": "onboarding@resend.dev", - "to": "user@example.com", - "created_at": "2022-07-25T00:28:32.493138+00:00" -} -``` - -See [documentation](https://resend.com/docs/api-reference/emails/send-email) for more - - -## Authentication Parameters -The Resend provider requires the following authentication parameter: - -- `api_key`: Required. Resend API key. You can obtain an API key by visiting [Resend API Keys](https://resend.com/api-keys). + ## Connecting with the Provider To connect with the Resend provider and send emails through Keep, follow these steps: diff --git a/docs/providers/documentation/rollbar-provider.mdx b/docs/providers/documentation/rollbar-provider.mdx index 521a3b98f2..964d4bfca6 100644 --- a/docs/providers/documentation/rollbar-provider.mdx +++ b/docs/providers/documentation/rollbar-provider.mdx @@ -3,12 +3,9 @@ title: "Rollbar" sidebarTitle: "Rollbar Provider" description: "Rollbar provides real-time error tracking and debugging tools for developers." --- +import AutoGeneratedSnippet from '/snippets/providers/rollbar-snippet-autogenerated.mdx'; -## Authentication Parameters - -The Rollbar provider requires the following authentication parameters: - -- `rollbarAccessToken` - Project Access Token is used to authenticate the Rollbar API requests. + ## Connecting with the Provider @@ -24,4 +21,4 @@ You can manage the permissions granted by the webhook integration by navigating ## Usefull Links -- [Rollbar](https://rollbar.com/) +- [Rollbar](https://rollbar.com/) \ No newline at end of file diff --git a/docs/providers/documentation/s3-provider.mdx b/docs/providers/documentation/s3-provider.mdx new file mode 100644 index 0000000000..de6357e159 --- /dev/null +++ b/docs/providers/documentation/s3-provider.mdx @@ -0,0 +1,34 @@ +--- +title: "AWS S3" +sidebarTitle: "AWS S3 Provider" +description: "AWS S3 provider to query S3 buckets" +--- +import AutoGeneratedSnippet from '/snippets/providers/s3-snippet-autogenerated.mdx'; + + + +## Limitations + +Querying only yaml, yml, json, xml and csv files. + +## Scopes + +Please note that during the installation, the provider is performing `list_buckets` to validate the config. Here is an example IAM policy: +``` +{ + "Version": "2025-01-15", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "s3:ListBucket", + "s3:GetObject", + "s3:GetBucketLocation", + "s3:ListAllMyBuckets" + ], + "Resource": "*" + } + ] +} +``` \ No newline at end of file diff --git a/docs/providers/documentation/sendgrid-provider.mdx b/docs/providers/documentation/sendgrid-provider.mdx index 45b6c94250..2ebdea2265 100644 --- a/docs/providers/documentation/sendgrid-provider.mdx +++ b/docs/providers/documentation/sendgrid-provider.mdx @@ -2,39 +2,13 @@ title: "SendGrid" sidebarTitle: "SendGrid Provider" --- +import AutoGeneratedSnippet from '/snippets/providers/sendgrid-snippet-autogenerated.mdx'; # SendGrid Provider SendGridProvider is a class that implements the SendGrid API and allows email sending through Keep. -## Inputs -The `notify` function of `SendGridProvider` takes the following arguments: - -- `to` (str): Required. The email address of the recipient. -- `subject` (str): Required. The subject of the email. -- `html` (str): Required. The HTML body of the email. -- `**kwargs` (optional): Additional optional parameters can be provided as key-value pairs. - -See [documentation](https://www.twilio.com/docs/sendgrid/api-reference) for more details. - -## Outputs -The `notify` function of `SendGridProvider` outputs the following format (example): -``` -{ - "status_code": 202, - "body": "", - "headers": { - "X-Message-Id": "G9RvW0ONQ0uK7eRfhHfZTQ" - } -} -``` -See [documentation](https://www.twilio.com/docs/sendgrid/api-reference) for more details. - -## Authentication Parameters -The SendGrid provider requires the following authentication parameters: - -- `api_key`: Required. SendGrid API key. You can obtain an API key by visiting [SendGrid API Keys](https://www.twilio.com/docs/sendgrid/api-reference/api-keys). -- `from_email`: Required. The email address from which the email is sent. + ## Connecting with the Provider To connect with the SendGrid provider and send emails through Keep, follow these steps: @@ -43,23 +17,6 @@ To connect with the SendGrid provider and send emails through Keep, follow these 2. Configure the SendGrid provider in your system with the obtained API key and the `from_email` address. 3. Use the following YAML example to send an email notification using the SendGrid provider: -``` -title=examples/alert_example.yml -# Send an email notification using the SendGrid provider. -alert: - id: email-notification - description: Send an email notification using SendGrid - actions: - - name: send-email - provider: - type: sendgrid - config: "{{ providers.sendgrid-provider }}" - with: - to: "recipient@example.com" - subject: "Hello from SendGrid Provider" - html: "

This is the email body.

" -``` - ## Useful Links - [SendGrid API Keys](https://sendgrid.com/docs/ui/account-and-settings/api-keys/) - [SendGrid API Reference](https://www.twilio.com/docs/sendgrid/api-reference) diff --git a/docs/providers/documentation/sentry-provider.mdx b/docs/providers/documentation/sentry-provider.mdx index cf1f80fcd5..29eff59b99 100644 --- a/docs/providers/documentation/sentry-provider.mdx +++ b/docs/providers/documentation/sentry-provider.mdx @@ -4,23 +4,17 @@ sidebarTitle: "Sentry Provider" description: "Sentry provider allows you to query Sentry events and to pull/push alerts from Sentry" --- -## Inputs +import AutoGeneratedSnippet from "/snippets/providers/sentry-snippet-autogenerated.mdx"; -- `time: str = "14d"`: The time range for the query (e.g., `1d`) -- `project: str`: The project to query on. + -## Authentication Parameters - -The `api_key` and `organization_slug` are required for connecting to the Sentry provider. You can obtain them as described in the "Connecting with the Provider" section. - -`project_slug` is if you want to connect Sentry to a specific project within an organization. +## Connecting with the Provider -To connect self hosted Sentry, you need to set the `api_url` parameter. Default value is `https://sentry.io/api/0/`. + To connect self hosted Sentry, you need to set the `api_url` parameter. + Default value is `https://sentry.io/api/0/`. -## Connecting with the Provider - ### API Key To obtain the Sentry API key, follow these steps ([Docs](https://docs.sentry.io/product/integrations/integration-platform/?original_referrer=https%3A%2F%2Fwww.google.com%2F#internal-integrations)): @@ -29,12 +23,41 @@ To obtain the Sentry API key, follow these steps ([Docs](https://docs.sentry.io/ 2. Navigate `Settings` -> `Developer Settings` section. 3. Click on `Custom integrations`. 4. Click on `Create New Integration` on the top right side of the screen. + + + + + 5. Select `Internal Integration` and click `Next` + + + + + 6. Give the integration an indicative name, e.g. `Keep Integration` -7. From the permission section, select the required scopes as defined at the bottom of this page. +7. From the permission section, select the required scopes: + +Project: Read & Write +Issue & Event: Read +Organization: Read +Alerts: Read & Write (Not Mandatory) + + + + + 8. Click `Save Changes` + + + + + 9. Scroll down to the bottom of the screen to the `TOKENS` section and copy the generated token -- This is the API key you will be using in Keep. + + + + ### Organization Slug You can find the Organization Slug in your Sentry URL. @@ -46,20 +69,6 @@ To obtain the Organization Slug from the settings page: 2. Navigate `Settings` -> `General Settings`. 3. Copy the Organization Slug from the Organization Slug input. -## Scopes - -Certain scopes may be required to perform specific actions or queries via the Sentry Provider. Below is a summary of relevant scopes and their use cases: - -- `event:read` - | Required: `True` - | Description: `Read events and issues.` -- `project:read` - | Required: `True` - | Description: `Read projects in organization` -- `project:write` - | Required: `False` - | Description: `Write permission for projects in an organization.` (\*_Required for auto-webhook integration_) - ## Notes diff --git a/docs/providers/documentation/service-now-provider.mdx b/docs/providers/documentation/service-now-provider.mdx new file mode 100644 index 0000000000..986fc183e5 --- /dev/null +++ b/docs/providers/documentation/service-now-provider.mdx @@ -0,0 +1,20 @@ +--- +title: "Service Now" +sidebarTitle: "Service Now Provider" +description: "Service Now provider allows sending notifications, updates, and retrieving topology information from the ServiceNow CMDB." +--- +import AutoGeneratedSnippet from '/snippets/providers/servicenow-snippet-autogenerated.mdx'; + + + +## Connecting with the Provider + +1. Ensure that the ServiceNow instance is accessible via API. +2. Provide the necessary API credentials (`instance_url` and `api_token`) in the provider configuration. + +## Additional + +- `KEEP_SERVICENOW_PROVIDER_SKIP_SCOPE_VALIDATION` envirnomental variable in the backend allows to bypass scope validation. + +## Useful Links +- [Service Now API documentation](https://docs.servicenow.com/bundle/xanadu-api-reference/page/build/applications/concept/api-rest.html) \ No newline at end of file diff --git a/docs/providers/documentation/signalfx-provider.mdx b/docs/providers/documentation/signalfx-provider.mdx index 3dd3164a6c..2d1f0976a7 100644 --- a/docs/providers/documentation/signalfx-provider.mdx +++ b/docs/providers/documentation/signalfx-provider.mdx @@ -3,6 +3,7 @@ title: "SignalFX" sidebarTitle: "SignalFX Provider" description: "SignalFX provider allows you get alerts from SignalFX Alerting via webhooks." --- +import AutoGeneratedSnippet from '/snippets/providers/signalfx-snippet-autogenerated.mdx'; ## Overview SignalFX Provider enriches your monitoring and alerting capabilities by seamlessly integrating with SignalFX Alerting via webhooks. This integration allows you to receive alerts directly from SignalFX, ensuring you're promptly informed about significant events and metrics within your infrastructure. @@ -151,6 +152,7 @@ Fingerprints in SignalFx calculated based on (incidentId, detectorId). The automatic webhook integration gains access to the `API` authScope, which gives Keep the ability to read and write to the SignalFx API. + ## Useful Links diff --git a/docs/providers/documentation/signl4-provider.mdx b/docs/providers/documentation/signl4-provider.mdx index 099bed1ea6..7399e14575 100644 --- a/docs/providers/documentation/signl4-provider.mdx +++ b/docs/providers/documentation/signl4-provider.mdx @@ -2,31 +2,9 @@ title: "SIGNL4 Provider" description: "SIGNL4 offers critical alerting, incident response and service dispatching for operating critical infrastructure. It alerts you persistently via app push, SMS text and voice calls including tracking, escalation, collaboration and duty planning. Find out more at [signl4.com](https://www.signl4.com/)" --- +import AutoGeneratedSnippet from '/snippets/providers/signl4-snippet-autogenerated.mdx'; -## Inputs - -The `notify` function in the `Signl4Provider` class takes the following parameters: - -```python -kwargs (dict): - title (str): Title of the SIGNL4 alert. *Required* - message (str): Alert message. - user (str): User, e.g. the requester of the incident. - s4_external_id (str): If the event originates from a record in a 3rd party system, use this parameter to pass the unique ID of that record. That ID will be communicated in outbound webhook notifications from SIGNL4, which is great for correlation / synchronization of that record with the alert. - s4_status (str): If you want to resolve an existing alert by an external id (s4_external_id), you can add this status parameter. It has three possible values. new: Default value which means that this event triggers a new alert. acknowledged: If you want to acknowledge a previously triggered alert (e.g. someone responded in the 3rd party system and not in the mobile app during business hours), set the s4_status to 'acknowledged' and provide an external ID via the s4_external_id parameter for the alert you want to acknowledge. It is only possible to acknowledge a Signl with a provided external id that initially triggered it. resolved: If you want to resolve a previously triggered alert (e.g. monitoring system has auto-closed the event), make sure to set the s4_status to 'resolved' and provide an external ID via the s4_external_id parameter for the alerts(s) you want to resolve. It is only possible to resolve a Signl with a provided external id that initially triggered it. - s4_service (str): Assigns the alert to the service / system category with the specified name. - s4_location (str): Transmit location information ('latitude, longitude') with your event and display a map in the mobile app. - s4_alerting_scenario (str): If this event triggers a Signl, allows to control how SIGNL4 notifies the team. single_ack: Only one person needs to acknowledge this alert. multi_ack: The Signl must be confirmed by the number of people who are on duty at the time this alert is created. emergency: All people in the team are notified regardless of their duty status and must acknowledge the Signl, which is also assigned to the built-in emergency category. - s4_filtering (bool): Specify a boolean value of true or false to apply event filtering for this event, or not. If set to true, the event will only trigger a notification to the team, if it contains at least one keyword from one of your services and system categories (i.e. it is whitelisted). -``` - -You can find more information [here](https://connect.signl4.com/webhook/docs/index.html). - -## Authentication Parameters - -The Signl4ProviderAuthConfig class takes the following parameters: -python -signl4_integration_secret (str): Your SIGNL4 integration or team secret. + ## Connecting with the Provider diff --git a/docs/providers/documentation/site24x7-provider.mdx b/docs/providers/documentation/site24x7-provider.mdx index affc91a17d..59a6417e77 100644 --- a/docs/providers/documentation/site24x7-provider.mdx +++ b/docs/providers/documentation/site24x7-provider.mdx @@ -2,10 +2,9 @@ title: "Site24x7 Provider" description: "The Site24x7 Provider allows you to install webhooks and receive alerts in Site24x7. It manages authentication, setup of webhooks, and retrieval of alert logs from Site24x7." --- +import AutoGeneratedSnippet from '/snippets/providers/site24x7-snippet-autogenerated.mdx'; -## Inputs - -The `Site24x7Provider` class handles authentication and interacts with the Site24x7 API to install webhooks and fetch alerts. Here are the primary methods and their parameters: + ### Main Class Methods @@ -18,15 +17,6 @@ The `Site24x7Provider` class handles authentication and interacts with the Site2 - **`_get_alerts()`** - Returns a list of `AlertDto` objects representing the alerts. -### Authentication Parameters - -The `Site24x7ProviderAuthConfig` class is used for API authentication and includes: - -- **`zohoRefreshToken (str)`**: Refresh token for Zoho authentication. *Required* -- **`zohoClientId (str)`**: Client ID for Zoho authentication. *Required* -- **`zohoClientSecret (str)`**: Client Secret for Zoho authentication. *Required* -- **`zohoAccountTLD (str)`**: Top-Level Domain for the Zoho account. Options include `.com`, `.eu`, `.com.cn`, `.in`, `.com.au`, `.jp`. *Required* - ## Connecting with the Provider To use the Site24x7 Provider, initialize it with the necessary authentication credentials and provider configuration. Ensure that your Zoho account credentials (Client ID, Client Secret, and Refresh Token) are correctly set up in the `Site24x7ProviderAuthConfig`. @@ -75,9 +65,11 @@ To use the Site24x7 Provider, initialize it with the necessary authentication cr --- ## Notes +- You must use your domain-specific Zoho Accounts URL to generate refresh tokens, otherwise you will receive an `invalid_client` error. See [Data center for Zoho Account](https://help.zoho.com/portal/en/kb/accounts/manage-your-zoho-account/articles/data-center-for-zoho-account). - Ensure that the necessary scopes **Site24x7.Admin.Read, Site24x7.Admin.Create, Site24x7.Operations.Read** are included when generating the grant token, as they dictate the API functionalities accessible via the provider. - Zoho API Console [Link](https://api-console.zoho.com) + ## Webhook Integration Modifications The webhook integration grants Keep access to the following scopes within Site24x7: @@ -94,3 +86,4 @@ The webhook can be accessed via the "Alarms" section in the Site24x7 console. - [Zoho OAuth Documentation](https://www.zoho.com/accounts/protocol/oauth/web-apps.html) - [Site 24x7 Authentication Guide](https://www.site24x7.com/help/api/#authentication) - [Third Party and Webhook Integrations](https://www.site24x7.com/help/api/#third-party-integrations) +- [List of Zoho Account datacenters](https://help.zoho.com/portal/en/kb/accounts/manage-your-zoho-account/articles/data-center-for-zoho-account) diff --git a/docs/providers/documentation/slack-provider.mdx b/docs/providers/documentation/slack-provider.mdx index 2eadada1e5..b5ae156d53 100644 --- a/docs/providers/documentation/slack-provider.mdx +++ b/docs/providers/documentation/slack-provider.mdx @@ -3,6 +3,7 @@ title: "Keep's integration for Slack" sidebarTitle: "Integration for Slack" description: "Enhance your Keep workflows with direct Slack notifications. Simplify communication with timely updates and alerts directly within Slack." --- +import AutoGeneratedSnippet from '/snippets/providers/slack-snippet-autogenerated.mdx'; ## Overview @@ -13,6 +14,9 @@ Keep's integration for Slack enables seamless communication by allowing you to s - **Direct Notifications**: Utilize Keep to send messages directly to your Slack channels. - **Flexible Configuration**: Easily configure alerts based on specific triggers within your Keep workflows. - **Interactive Messages**: Enhance your Slack messages with interactive components like buttons and inputs. +- **Editable Messages**: Update existing Slack messages dynamically based on changes in alert status or other workflow outcomes, ensuring that your notifications reflect the most current information. + + ## Getting Started @@ -55,15 +59,6 @@ With Keep's integration for Slack installed, you're ready to enhance your workfl 2. **Send a Test Notification**: Ensure your setup is correct by sending a test notification through your configured workflow, use the "Run Manually" link for that.. -### Inputs - -The `notify` function take following parameters as inputs: - -- `message`: Required. Message text to send to Slack -- `blocks`: Optional. Array of interactive components like inputs, buttons -- `channel`: Optional. The channel ID to send to if using the OAuth integration. - - ## Useful Links - [Slack API Documentation](https://api.slack.com/messaging/webhooks) diff --git a/docs/providers/documentation/smtp-provider.mdx b/docs/providers/documentation/smtp-provider.mdx new file mode 100644 index 0000000000..7295959886 --- /dev/null +++ b/docs/providers/documentation/smtp-provider.mdx @@ -0,0 +1,59 @@ +--- +title: 'SMTP' +sidebarTitle: 'SMTP Provider' +description: 'SMTP Provider allows you to send emails.' +--- +import AutoGeneratedSnippet from '/snippets/providers/smtp-snippet-autogenerated.mdx'; + +## Overview + +SMTP Provider allows you to send emails from Keep. Most of the email services like Gmail, Yahoo, Mailgun, etc. provide SMTP servers to send emails. You can use these SMTP servers to send emails from Keep. + +The SMTP provider supports both plain text and HTML-formatted emails, allowing you to create rich, styled email notifications. + + + +## Connecting with SMTP Provider + +1. Obtain the SMTP credentials from your email service provider. Example: Gmail, Yahoo, Mailgun, etc. +2. Add SMTP Provider in Keep with the obtained credentials. +3. Connect the SMTP Provider with Keep. + +## Email Format Support + +The SMTP provider supports two email formats: + +### Plain Text Emails +Use the `body` parameter to send plain text emails: +```yaml +with: + from_email: "sender@example.com" + from_name: "Keep Alerts" + to_email: "recipient@example.com" + subject: "Alert Notification" + body: "This is a plain text email notification." +``` + +### HTML Emails +Use the `html` parameter to send HTML-formatted emails: +```yaml +with: + from_email: "sender@example.com" + from_name: "Keep Alerts" + to_email: "recipient@example.com" + subject: "Alert Notification" + html: "

Alert

This is an HTML email notification.

" +``` + +When both `body` and `html` are provided, the HTML content takes precedence. + +## Multiple Recipients + +You can send emails to multiple recipients by providing a list of email addresses: +```yaml +with: + to_email: + - "recipient1@example.com" + - "recipient2@example.com" + - "recipient3@example.com" +``` diff --git a/docs/providers/documentation/snowflake-provider.mdx b/docs/providers/documentation/snowflake-provider.mdx index 1a61d79379..19916a03dc 100644 --- a/docs/providers/documentation/snowflake-provider.mdx +++ b/docs/providers/documentation/snowflake-provider.mdx @@ -3,27 +3,6 @@ title: "Snowflake" sidebarTitle: "Snowflake Provider" description: "Template Provider is a template for newly added provider's documentation" --- +import AutoGeneratedSnippet from '/snippets/providers/snowflake-snippet-autogenerated.mdx'; -## Inputs - -_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ - -## Outputs - -_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ - -## Authentication Parameters - -_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ - -## Connecting with the Provider - -_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ - -## Notes - -_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ - -## Useful Links - -_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + diff --git a/docs/providers/documentation/splunk-provider.mdx b/docs/providers/documentation/splunk-provider.mdx index 88956cd8be..042f5cb010 100644 --- a/docs/providers/documentation/splunk-provider.mdx +++ b/docs/providers/documentation/splunk-provider.mdx @@ -3,14 +3,9 @@ title: "Splunk" sidebarTitle: "Splunk Provider" description: "Splunk provider allows you to get Splunk `saved searches` via webhook installation" --- +import AutoGeneratedSnippet from '/snippets/providers/splunk-snippet-autogenerated.mdx'; -## Authentication Parameters -The Splunk provider requires the following authentication parameter: - -- `Splunk UseAPI Key`: Required. This is your Splunk account username, which you use to log in to the Splunk platform. -- `Host`: This is the hostname or IP address of the Splunk instance you wish to connect to. It identifies the Splunk server that the API will interact with. -- `Port`: This is the network port on the Splunk server that is listening for API connections. The default port for Splunk's management API is typically 8089. -- `` + ## Connecting with the Provider diff --git a/docs/providers/documentation/squadcast-provider.mdx b/docs/providers/documentation/squadcast-provider.mdx index d7154ad473..1efda9b747 100644 --- a/docs/providers/documentation/squadcast-provider.mdx +++ b/docs/providers/documentation/squadcast-provider.mdx @@ -3,6 +3,9 @@ title: "Squadcast Provider" sidebarTitle: "Squadcast Provider" description: "Squadcast provider is a provider used for creating issues in Squadcast" --- +import AutoGeneratedSnippet from '/snippets/providers/squadcast-snippet-autogenerated.mdx'; + + ## Inputs @@ -24,14 +27,6 @@ The `notify` function take following parameters as inputs: See [documentation](https://support.squadcast.com/integrations/incident-webhook-incident-webhook-api) for more -## Authentication Parameters -The Squadcast provider requires at least one of the following authentication parameter: - -- `refresh_token` (optional): Your Squadcast refresh_token. -- `webhook_url` (optional): URL of your `incidents_webhook`. - -See [Squadcast Refresh Tokens](https://support.squadcast.com/terraform-and-api-documentation/public-api-refresh-token#from-your-profile-page) for more. - ## Connecting with the Provider 1. Go to [Refresh Tokens](https://support.squadcast.com/terraform-and-api-documentation/public-api-refresh-token#from-your-profile-page) to see how to create a `refresh_token`. diff --git a/docs/providers/documentation/ssh-provider.mdx b/docs/providers/documentation/ssh-provider.mdx index 0ec8acd469..e0eea9806e 100644 --- a/docs/providers/documentation/ssh-provider.mdx +++ b/docs/providers/documentation/ssh-provider.mdx @@ -3,25 +3,9 @@ title: "SSH" sidebarTitle: "SSH Provider" description: "The `SSH Provider` is a provider that provides a way to execute SSH commands and get their output." --- +import AutoGeneratedSnippet from '/snippets/providers/ssh-snippet-autogenerated.mdx'; -## Inputs - -- command [**mandatory**]: The command to be executed -- \*\*kwargs [**optional**]: Extra parameters to be formatted in the command (can be other steps output for example) - -## Outputs - -List of lines read from the remote SSH server, both the **stdout** and the **stderr** - -## Authentication Parameters - -This section describes the authentication configuration required for the `SshProvider`. The authentication configuration includes the following fields: - -- `host`: The hostname of the SSH server. -- `user`: The username to use for the SSH connection. -- `port`: The port to use for the SSH connection. Defaults to 22. -- `pkey`: The private key to use for the SSH connection. If provided, the connection will be established using this private key instead of a password. -- `password`: The password to use for the SSH connection. If the private key is not provided, the connection will be established using this password. + ## Connecting with the Provider diff --git a/docs/providers/documentation/statuscake-provider.mdx b/docs/providers/documentation/statuscake-provider.mdx index d4bde36444..f760e36103 100644 --- a/docs/providers/documentation/statuscake-provider.mdx +++ b/docs/providers/documentation/statuscake-provider.mdx @@ -1,14 +1,11 @@ --- title: "StatusCake" sidebarTitle: "StatusCake Provider" -description: "StatusCake allows you to monitor your website and APIs and send alert to keep" +description: "StatusCake allows you to monitor your website and APIs. Keep allows to read alerts and install webhook in StatusCake" --- +import AutoGeneratedSnippet from '/snippets/providers/statuscake-snippet-autogenerated.mdx'; -## Authentication Parameters - -The StatusCake provider requires the following authentication parameters: - -- `Statuscake API Key`: The API key for the StatusCake account. This is required for the StatusCake provider. + ## Connecting with the Provider diff --git a/docs/providers/documentation/sumologic-provider.mdx b/docs/providers/documentation/sumologic-provider.mdx new file mode 100644 index 0000000000..8161b248aa --- /dev/null +++ b/docs/providers/documentation/sumologic-provider.mdx @@ -0,0 +1,28 @@ +--- +title: "SumoLogic Provider" +sidebarTitle: "SumoLogic Provider" +description: "The SumoLogic provider enables webhook installations for receiving alerts in keep" +--- +import AutoGeneratedSnippet from '/snippets/providers/sumologic-snippet-autogenerated.mdx'; + +## Overview + +The SumoLogic provider facilitates receiving alerts from Monitors in SumoLogic using a Webhook Connection. + + + +## Connecting with the Provider + +1. Follow the instructions [here](https://help.sumologic.com/docs/manage/security/access-keys/) to get your Access Key & Access ID +2. Make sure the user has roles with the following capabilities: + - `manageScheduledViews` + - `manageConnections` + - `manageUsersAndRoles` +3. Find your `deployment` from [here](https://api.sumologic.com/docs/#section/Getting-Started/API-Endpoints), keep will automatically figure out your endpoint. + +## Useful Links + +- [SumoLogic API Documentation](https://api.sumologic.com/docs/#section/Getting-Started) +- [SumoLogic Access_Keys](https://help.sumologic.com/docs/manage/security/access-keys/) +- [SumoLogic Roles Management](https://help.sumologic.com/docs/manage/users-roles/roles/create-manage-roles/) +- [SumoLogic Deployments](https://api.sumologic.com/docs/#section/Getting-Started/API-Endpoints) diff --git a/docs/providers/documentation/teams-provider.mdx b/docs/providers/documentation/teams-provider.mdx index 2a907da426..5e822b1af8 100644 --- a/docs/providers/documentation/teams-provider.mdx +++ b/docs/providers/documentation/teams-provider.mdx @@ -1,53 +1,237 @@ --- -title: "Teams Provider" -sidebarTitle: "Teams Provider" -description: "Teams Provider is a provider that allows to notify alerts to Microsoft Teams chats." +title: "Microsoft Teams Provider" +sidebarTitle: "Microsoft Teams Provider" +description: "Microsoft Teams Provider is a provider that allows to notify alerts to Microsoft Teams chats." --- +import AutoGeneratedSnippet from '/snippets/providers/teams-snippet-autogenerated.mdx'; -## Inputs + -The `notify` function in the `TeamsProvider` class takes the following parameters: - -```python -kwargs (dict): - message (str): The message to send. *Required* - typeCard (str): The card type. (MessageCard is default) - themeColor (str): Hexadecimal color. - sections (array): Array of custom informations -``` +## Connecting with the Provider -## Outputs + + + 1. In the New Teams client, select Teams and navigate to the channel where + you want to add an Incoming Webhook. 2. Select More options ••• on the right + side of the channel name. 3. Select Manage Channel + + + + + For members who aren't admins of the channel, the Manage channel option is + available under the Open channel details option in the upper-right corner + of a channel. + + 4. Select Edit + + + + 5. Search for Incoming Webhook and select Add. + + + + 6. Select Add + + + + 7. Provide a name for the webhook and upload an image if necessary. 8. Select + Create. + + + + 9. Copy and save the unique webhook URL present in the dialog. The URL maps to + the channel and you can use it to send information to Teams. 10. Select Done. + The webhook is now available in the Teams channel. + + + + + + 1. In the Classic Teams client, select Teams and navigate to the channel + where you want to add an Incoming Webhook. 2. Select More options ••• from + the upper-right corner. 3. Select Connectors from the dropdown menu. + + + + 4. Search for Incoming Webhook and select Add. + + + + 5. Select Add. + + + + 6. Provide a name for the webhook and upload an image if necessary. 7. + Select Create. + + + + 8. Copy and save the unique webhook URL present in the dialog. The URL maps + to the channel and you can use it to send information to Teams. 9. Select + Done. + + + + + -_No information yet, feel free to contribute it using the "Edit this page" link the bottom of the page_ +## Notes -## Authentication Parameters +When using Adaptive Cards (`typeCard="message"`): + +- The `sections` parameter should follow the [Adaptive Cards schema](https://adaptivecards.io/explorer/) +- `themeColor` is ignored for Adaptive Cards +- If no sections are provided, the message will be displayed as a simple text block +- Both `sections` and `attachments` can be provided as JSON strings or arrays +- You can mention users in your Adaptive Cards using the `mentions` parameter + +### Workflow Example + +You can also find this example in our [examples](https://github.com/keephq/keep/tree/main/examples/workflows/keep-teams-adaptive-cards.yaml) folder in the Keep GitHub repository. + +```yaml +id: 6bc7c72e-ab3d-4913-84dd-08b9323195ae +description: Teams Adaptive Cards Example +disabled: false +triggers: + - type: manual + - filters: + - key: source + value: r".*" + type: alert +consts: {} +name: Keep Teams Adaptive Cards +owners: [] +services: [] +steps: [] +actions: + - name: teams-action + provider: + config: "{{ providers.teams }}" + type: teams + with: + message: "" + sections: '[{"type": "TextBlock", "text": "{{alert.name}}"}, {"type": "TextBlock", "text": "Tal from Keep"}]' + typeCard: message + # Optional: Add mentions to notify specific users + # mentions: '[{"id": "user@example.com", "name": "User Name"}]' +``` -The TeamsProviderAuthConfig class takes the following parameters: +You can also find an example with user mentions in our [examples](https://github.com/keephq/keep/tree/main/examples/workflows/keep-teams-adaptive-cards-with-mentions.yaml) folder. -- `webhook_url` (str): associated with the channel requires to trigger the message to the respective channel. _Required_ + + The sections parameter is a JSON string that follows the Adaptive Cards schema, but can also be an object. + If it's a string, it will be parsed as a JSON string. + -## Connecting with the Provider +### Using Sections -1. Open the Microsoft Teams application or website and select the team or channel where you want to add the webhook. +```python +provider.notify( + message="Fallback text", + typeCard="message", + sections=[ + { + "type": "TextBlock", + "text": "Hello from Adaptive Card!" + }, + { + "type": "Image", + "url": "https://example.com/image.jpg" + } + ] +) +``` -2. Click on the three-dot icon next to the team or channel name and select "Connectors" from the dropdown menu. +### Using Custom Attachments -3. Search for "Incoming Webhook" and click on the "Add" button. +```python +provider.notify( + typeCard="message", + attachments=[{ + "contentType": "application/vnd.microsoft.card.adaptive", + "content": { + "type": "AdaptiveCard", + "version": "1.2", + "body": [ + { + "type": "TextBlock", + "text": "Custom Attachment Example" + } + ] + } + }] +) +``` -4. Give your webhook a name and an optional icon, then click on the "Create" button. +### Using User Mentions in Adaptive Cards -5. Copy the webhook URL that is generated and save it for later use. +You can mention users in your Adaptive Cards using the `mentions` parameter. The text in your card should include the mention in the format `User Name`, and you need to provide the user's ID and name in the `mentions` parameter. -6. Select the options that you want to configure for your webhook, such as the default name and avatar that will be used when posting messages. +Teams supports three types of user IDs for mentions: +- Teams User ID (format: `29:1234...`) +- Microsoft Entra Object ID (format: `49c4641c-ab91-4248-aebb-6a7de286397b`) +- User Principal Name (UPN) (format: `user@example.com`) -7. Click on the "Save" button to save your webhook settings. +```python +provider.notify( + typeCard="message", + sections=[ + { + "type": "TextBlock", + "text": "Hello John Doe, please review this alert!" + } + ], + mentions=[ + { + "id": "john.doe@example.com", # Can be UPN, Microsoft Entra Object ID, or Teams User ID + "name": "John Doe" + } + ] +) +``` -You can now use the webhook URL to send messages to the selected channel or team in Microsoft Teams. +You can also mention multiple users in a single card: -## Notes +```python +provider.notify( + typeCard="message", + sections=[ + { + "type": "TextBlock", + "text": "Hello John Doe and Jane Smith, please review this alert!" + } + ], + mentions=[ + { + "id": "john.doe@example.com", + "name": "John Doe" + }, + { + "id": "49c4641c-ab91-4248-aebb-6a7de286397b", # Microsoft Entra Object ID + "name": "Jane Smith" + } + ] +) +``` -_No information yet, feel free to contribute it using the "Edit this page" link the bottom of the page_ +In YAML workflows, you can provide the mentions as a JSON string: + +```yaml +actions: + - name: teams-action + provider: + config: "{{ providers.teams }}" + type: teams + with: + typeCard: message + sections: '[{"type": "TextBlock", "text": "Hello John Doe, please review this alert!"}]' + mentions: '[{"id": "john.doe@example.com", "name": "John Doe"}]' +``` ## Useful Links - https://learn.microsoft.com/pt-br/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook +- https://learn.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/connectors-using +- https://adaptivecards.io/explorer/ +- https://adaptivecards.io/schemas/adaptive-card.json diff --git a/docs/providers/documentation/telegram-provider.mdx b/docs/providers/documentation/telegram-provider.mdx index 89ff0b4382..5513dac526 100644 --- a/docs/providers/documentation/telegram-provider.mdx +++ b/docs/providers/documentation/telegram-provider.mdx @@ -2,16 +2,11 @@ title: "Telegram Provider" description: "Telegram Provider is a provider that allows to notify alerts to telegram chats." --- +import AutoGeneratedSnippet from '/snippets/providers/telegram-snippet-autogenerated.mdx'; -## Inputs + -The `notify` function in the `TelegramProvider` class takes the following parameters: - -```python -kwargs (dict): - message (str): The message to send. *Required* - chat_id (str): The chat_id of which to send the message to. *Required* (How to get chat id - https://stackoverflow.com/questions/32423837/telegram-bot-how-to-get-a-group-chat-id) -``` +Telegram only supports limited formatting options. Refer to the [Telegram Bot API documentation](https://core.telegram.org/bots/api#formatting-options) for more information. ## Authentication Parameters diff --git a/docs/providers/documentation/template.mdx b/docs/providers/documentation/template.mdx index 5eff334d85..0700a73a83 100644 --- a/docs/providers/documentation/template.mdx +++ b/docs/providers/documentation/template.mdx @@ -2,6 +2,9 @@ title: "Template" description: "Template Provider is a template for newly added provider's documentation" --- +{/* import AutoGeneratedSnippet from '/snippets/providers/template-snippet-autogenerated.mdx'; */} + +{/* */} ## Inputs diff --git a/docs/providers/documentation/thousandeyes-provider.mdx b/docs/providers/documentation/thousandeyes-provider.mdx new file mode 100644 index 0000000000..03e32828f8 --- /dev/null +++ b/docs/providers/documentation/thousandeyes-provider.mdx @@ -0,0 +1,89 @@ +--- +title: 'ThousandEyes' +sidebarTitle: 'ThousandEyes Provider' +description: 'ThousandEyes allows you to receive alerts from ThousandEyes using API endpoints as well as webhooks' +--- + +import AutoGeneratedSnippet from '/snippets/providers/thousandeyes-snippet-autogenerated.mdx'; + + + +## Connecting ThousandEyes to Keep + +1. Go to [ThousandEyes Dashboard](https://app.thousandeyes.com/dashboard) + +2. Click on `Manage` in the left sidebar and select `Account Settings`. + + + + + +3. Select `Users and Roles` in the Account Settings + + + + + +4. Under `User API Tokens`, you can create OAuth Bearer Token + + + + + +5. Copy the generated token. This will be used as the `OAuth2 Bearer Token` in the provider settings. + +## Webhooks Integration + +1. Open [ThousandEyes Dashboard](https://app.thousandeyes.com/dashboard) and click on `Network & App Synthetics` in the left sidebar and select `Agent Settings`. + + + + + +2. Go to `Notifications` under `Enterprise Agents` and click on `Notifications`. + + + + + +3. Go to `Notifications` and create new webhook notification. + + + + + +4. Give it a name and set the url as [https://api.keephq.dev/alerts/event/thousandeyes?api_key=your-api-key](https://api.keephq.dev/alerts/event/thousandeyes?api_key=your-api-key) + +5. Select `Auth Type` as None and `Add New Webhook`. + + + + + +6. Go to Keep dashboard and click on the profile icon in the botton left corner and click `Settings`. + + + + + +7. Select `Users and Access` tab and then select `API Keys` tab and create a new API key. + + + + + +8. Give name and select the role as `webhook` and click on `Create API Key`. + + + + + +9. Copy the API key and paste it in the webhook URL. + + + + + +## Useful Links + +- [ThousandEyes](https://www.thousandeyes.com/) diff --git a/docs/providers/documentation/trello-provider.mdx b/docs/providers/documentation/trello-provider.mdx index 8c38148c2f..aaa6da8b92 100644 --- a/docs/providers/documentation/trello-provider.mdx +++ b/docs/providers/documentation/trello-provider.mdx @@ -3,19 +3,9 @@ title: "Trello" sidebarTitle: "Trello Provider" description: "Trello provider is a provider used to query data from Trello" --- +import AutoGeneratedSnippet from '/snippets/providers/trello-snippet-autogenerated.mdx'; -## Inputs - -The `query` function take following parameters as inputs: - -- `board_id`: Required. Trello board id -- `filter`: Optional. Comma seperated list of trello events that want to query, default value is 'createCard' - -## Outputs - -## Authentication Parameters - -The `query` function requires an `api_key` and `api_token` from Trello, which can obtained by making custom power-up in Trello admin. + ## Connecting with the Provider diff --git a/docs/providers/documentation/twilio-provider.mdx b/docs/providers/documentation/twilio-provider.mdx index b0f3d959a1..67992eb6d3 100644 --- a/docs/providers/documentation/twilio-provider.mdx +++ b/docs/providers/documentation/twilio-provider.mdx @@ -2,24 +2,9 @@ title: "Twilio Provider" description: "Twilio Provider is a provider that allows to notify alerts via SMS using Twilio." --- +import AutoGeneratedSnippet from '/snippets/providers/twilio-snippet-autogenerated.mdx'; -## Inputs - -The `notify` function in the `TwilioProvider` class takes the following parameters: - -```python -kwargs (dict): - message_body (str): The message to send. *Required* - to_phone_number (str): The phone number to which you want to send SMS. *Required* -``` - -## Authentication Parameters - -The TwilioProviderAuthConfig class takes the following parameters: - -- account_sid (str): Twilio account SID. \*Required\*\* -- api_token (str): Twilio API token. \*Required\*\* -- from_phone_number (str): Twilio phone number from which SMS alert will be sent. \*Required\*\* + ## Connecting with the Provider @@ -29,4 +14,4 @@ How to create Twilio API token - https://support.twilio.com/hc/en-us/articles/22 ## Useful Links - Twilio API token - https://support.twilio.com/hc/en-us/articles/223136027-Auth-Tokens-and-How-to-Change-Them -- Twilio phone number - https://www.twilio.com/en-us/guidelines/regulatory +- Twilio phone number - https://www.twilio.com/en-us/guidelines/regulatory \ No newline at end of file diff --git a/docs/providers/documentation/uptimekuma-provider.mdx b/docs/providers/documentation/uptimekuma-provider.mdx index b52f02769d..9aaa3fd5f7 100644 --- a/docs/providers/documentation/uptimekuma-provider.mdx +++ b/docs/providers/documentation/uptimekuma-provider.mdx @@ -3,14 +3,9 @@ title: "UptimeKuma" sidebarTitle: "UptimeKuma Provider" description: "UptimeKuma allows you to monitor your website and APIs and send alert to keep" --- +import AutoGeneratedSnippet from '/snippets/providers/uptimekuma-snippet-autogenerated.mdx'; -## Authentication Parameters - -The UptimeKuma provider requires the following authentication parameters: - -- `UptimeKuma Host URL`: The URL of the UptimeKuma instance. This is required for the UptimeKuma provider. -- `UptimeKuma Username`: The username for the UptimeKuma account. This is required for the UptimeKuma provider. -- `UptimeKuma Password`: The password for the UptimeKuma account. This is required for the UptimeKuma provider. + ## Connecting with the Provider diff --git a/docs/providers/documentation/victorialogs-provider.mdx b/docs/providers/documentation/victorialogs-provider.mdx new file mode 100644 index 0000000000..8469d6bc13 --- /dev/null +++ b/docs/providers/documentation/victorialogs-provider.mdx @@ -0,0 +1,64 @@ +--- +title: 'VictoriaLogs' +sidebarTitle: 'VictoriaLogs Provider' +description: 'VictoriaLogs provider allows you to query logs from VictoriaLogs.' +--- +import AutoGeneratedSnippet from '/snippets/providers/victorialogs-snippet-autogenerated.mdx'; + +## Overview + +VictoriaLogs is open source user-friendly database for logs from VictoriaMetrics. It is optimized for high performance and low memory usage. It can handle high cardinality and high volume of logs. + +Note: To add authentication VMAuth should be configured. For more information, refer to the [VMauth documentation](https://docs.victoriametrics.com/vmauth/). + + + + +### NoAuth +- No additional parameters are required, only the `Grafana Loki Host URL` is required. + +### HTTP basic authentication +- `HTTP basic authentication - Username`: The username to use for HTTP basic authentication. +- `HTTP basic authentication - Password`: The password to use for HTTP basic authentication. + +### Bearer +- `Bearer Token` : The bearer token to use for authentication. +- `X-Scope-OrgID Header`: The organization ID to use for VictoriaLogs Multi-tenancy support. (Optional) + +## Querying VictoriaLogs + +The VictoriaLogs provider allows you to query logs from VictoriaLogs through the `query`, `hits`, `stats_query` and `stats_query_range` types. The following are the parameters available for querying: + +1. `query` type: + + - `query`: This is the query to perform. + - `limit`: The max number of matching entries to return. + - `timeout`: The query timeout in seconds. + - `AccountID`: The account ID to use for VictoriaLogs. + - `ProjectID`: The project ID to use for VictoriaLogs. + +2. `hits` type: + + - `query`: This is the query to perform. + - `start`: The start time for the query. + - `end`: The end time for the query. + - `step`: The step for the query. + - `AccountID`: The account ID to use for VictoriaLogs. + - `ProjectID`: The project ID to use for VictoriaLogs. + +3. `stats_query` type: + + - `query`: This is the query to perform. + - `time`: The evaluation time for the query. + +4. `stats_query_range` type: + + - `query`: This is the query to perform. + - `start`: The start time for the query. + - `end`: The end time for the query. + - `step`: The step for the query. + +## Useful Links + +- [VictoriaLogs](https://docs.victoriametrics.com/victorialogs/) +- [VMauth documentation](https://docs.victoriametrics.com/vmauth/) \ No newline at end of file diff --git a/docs/providers/documentation/victoriametrics-provider.mdx b/docs/providers/documentation/victoriametrics-provider.mdx index c404bda4d4..005ed33120 100644 --- a/docs/providers/documentation/victoriametrics-provider.mdx +++ b/docs/providers/documentation/victoriametrics-provider.mdx @@ -3,44 +3,33 @@ title: "Victoriametrics Provider" sidebarTitle: "Victoriametrics Provider" description: "The VictoriametricsProvider allows you to fetch alerts in Victoriametrics." --- +import AutoGeneratedSnippet from '/snippets/providers/victoriametrics-snippet-autogenerated.mdx'; -## Authentication Parameters -The Victoriametrics provider requires the following authentication parameters: - -- `VMAlertHost`: The hostname or IP address where VMAlert is running. Example: `localhost`, `192.168.1.100`, or `vmalert.mydomain.com`. -- `VMAlertPort`: The port number on which VMAlert is listening. Example: 8880 (if VMAlert is set to listen on port 8880). + ## Connecting with the Provider 1. Ensure you have a running instance of VMAlert accessible by the host and port specified. 2. Include the host and port information in your Victoriametrics provider configuration when initializing the provider. +## Querying Victoriametrics + +The Victoriametrics provider allows you to query from Victoriametrics through `query` and `query_range` types. The following are the parameters available for querying: + +1. `query` type: -## Push alerts to keep using webhooks - -You can push alerts to keep without connecting to Victoriametrics This provider takes advantage of configurable webhooks available with Prometheus Alertmanager. Use the following template to configure AlertManager: -```yml -route: - receiver: "keep" - group_by: ['alertname'] - group_wait: 15s - group_interval: 15s - repeat_interval: 1m - continue: true - -receivers: -- name: "keep" - webhook_configs: - - url: '{keep_webhook_api_url}' - send_resolved: true - http_config: - basic_auth: - username: api_key - password: {api_key} -``` + - `query`: The query to execute on Victoriametrics. Example: `sum(rate(http_requests_total{job="api-server"}[5m]))`. + - `start`: The time to query the data for. Example: `2024-01-01T00:00:00Z` + +2. `query_range` type: + - `query`: The query to execute on Victoriametrics. Example: `sum(rate(http_requests_total{job="api-server"}[5m]))`. + - `start`: The start time to query the data for. Example: `2024-01-01T00:00:00Z` + - `end`: The end time to query the data for. Example: `2024-01-01T00:00:00Z` + - `step`: The step size to use for the query. Example: `15s` ## Useful Links - [Victoriametrics](https://victoriametrics.com/docs/) - [VMAlert](https://victoriametrics.github.io/vmalert.html) + diff --git a/docs/providers/documentation/vllm-provider.mdx b/docs/providers/documentation/vllm-provider.mdx new file mode 100644 index 0000000000..a6511215e3 --- /dev/null +++ b/docs/providers/documentation/vllm-provider.mdx @@ -0,0 +1,19 @@ +--- +title: "vLLM Provider" +description: "The vLLM Provider enables integration with vLLM-deployed language models into Keep." +--- +import AutoGeneratedSnippet from '/snippets/providers/vllm-snippet-autogenerated.mdx'; + + + The vLLM Provider supports querying language models deployed with vLLM for prompt-based interactions. + + + + +## Connecting with the Provider + +To connect to a vLLM deployment: + +1. Deploy your vLLM instance or obtain the API endpoint of an existing deployment +2. Configure the API URL in your provider configuration +3. If your deployment requires authentication, configure the API key diff --git a/docs/providers/documentation/wazuh-provider.mdx b/docs/providers/documentation/wazuh-provider.mdx new file mode 100644 index 0000000000..49cafa8e98 --- /dev/null +++ b/docs/providers/documentation/wazuh-provider.mdx @@ -0,0 +1,63 @@ +--- +title: 'Wazuh' +sidebarTitle: 'Wazuh Provider' +description: 'Wazuh provider allows you to get alerts from Wazuh via custom integration.' +--- +import AutoGeneratedSnippet from '/snippets/providers/wazuh-snippet-autogenerated.mdx'; + +## Overview + +The Wazuh provider enables seamless integration between Keep and Wazuh. +It allows you to get alerts from Wazuh to Keep via custom integration making it easier to +track security-related activities in one place. + +Please refer to the [Wazuh Docs](https://documentation.wazuh.com/current/user-manual/manager/integration-with-external-apis.html#custom-integration) if you want to learn more about Wazuh Custom Integrations. + + + + + +## Connecting Wazuh to Keep + +To connect Wazuh to Keep, you need to configure it as a custom integration in Wazuh. Follow the steps below to set up the integration: + +1. Keep webhook scripts need to installed on the Wazuh server. + +2. You can download the Keep webhook scripts using the following command: + +```bash +wget -O custom-keep.py https://github.com/keephq/keep/blob/main/keep/providers/wazuh_provider/custom-keep.py?raw=true +wget -O custom-keep https://github.com/keephq/keep/blob/main/keep/providers/wazuh_provider/custom-keep?raw=true +``` + +3. Copy the downloaded script to the following path on the Wazuh server: `/var/ossec/integrations/` and set correct permissions +```bash +cp custom-keep.py /var/ossec/integrations/custom-keep.py +cp custom-keep /var/ossec/integrations/custom-keep +chown root:wazuh custom-keep* +chmod 750 /var/ossec/integrations/custom-keep* +``` + +4. Get the Webhook URL of Keep which is `https://api.keephq.dev/alerts/event/wazuh`. + +5. Get the API Key of Keep which you can generate in the [Keep settings](https://platform.keephq.dev/settings?selectedTab=users&userSubTab=api-keys). + +6. In the config `/var/ossec/etc/ossec.conf` set new integration block +```xml + + custom-keep + 10 + PLACE_YOUR_KEEP_WEBHOOK_URL_HERE + PLACE_HERE_YOUR_API_KEY + json + +``` +Please refer to the [Wazuh Documentation](https://documentation.wazuh.com/current/user-manual/manager/integration-with-external-apis.html#custom-integration) for more information +and set the `level` you are interested in. +7. Restart the `wazuh-manager` +```bash +$ systemctl restart wazuh-manager +``` +## Useful Links + +- [Wazuh](https://documentation.wazuh.com/) \ No newline at end of file diff --git a/docs/providers/documentation/webhook-provider.mdx b/docs/providers/documentation/webhook-provider.mdx new file mode 100644 index 0000000000..e358a84c63 --- /dev/null +++ b/docs/providers/documentation/webhook-provider.mdx @@ -0,0 +1,8 @@ +--- +title: 'Webhook' +sidebarTitle: 'Webhook Provider' +description: 'A webhook is a method used to send real-time data from one application to another whenever a specific event occurs' +--- +import AutoGeneratedSnippet from '/snippets/providers/webhook-snippet-autogenerated.mdx'; + + diff --git a/docs/providers/documentation/websocket-provider.mdx b/docs/providers/documentation/websocket-provider.mdx index 76299436bd..b9cb4ed78a 100644 --- a/docs/providers/documentation/websocket-provider.mdx +++ b/docs/providers/documentation/websocket-provider.mdx @@ -1,20 +1,9 @@ --- title: "Websocket" --- +import AutoGeneratedSnippet from '/snippets/providers/websocket-snippet-autogenerated.mdx'; -# Websocket Provider - -WebsocketProvider is a class that implements a simple websocket provider. - -## Inputs -The `query` function of `WebsocketProvider` takes the following arguments: - -- `socket_url` (str): The websocket URL to query. -- `timeout` (int | None, optional): Connection Timeout. Defaults to None. -- `data` (str | None, optional): Data to send through the websocket. Defaults to None. -- `**kwargs` (optional): Additional optional parameters can be provided as key-value pairs. - -See [documentation](https://websocket-client.readthedocs.io/en/latest/api.html#websocket.WebSocket.send) for more information. + ## Outputs The `query` function of `WebsocketProvider` outputs the following format: @@ -39,30 +28,4 @@ To connect with the Websocket provider and perform queries, follow these steps: Initialize the provider and provider configuration in your system. Use the query function of the WebsocketProvider to interact with the websocket. -Example usage: -```yaml -alert: - id: check-websocket-is-up - description: Monitor that this HTTP endpoint is up and running - steps: - - name: websocket-test - provider: - type: websocket - with: - socket_url: "ws://echo.websocket.events" - actions: - - name: trigger-slack-websocket - condition: - - name: assert-condition - type: assert - assert: "{{ steps.websocket-test.results.connection }} == true" - provider: - type: slack - config: "{{ providers.slack-demo }}" - with: - message: "Could not connect to ws://echo.websocket.events using websocket" - on-failure: - provider: - type: slack - config: "{{ providers.slack-demo }}" -``` +See [documentation](https://websocket-client.readthedocs.io/en/latest/api.html#websocket.WebSocket.send) for more information. \ No newline at end of file diff --git a/docs/providers/documentation/youtrack-provider.mdx b/docs/providers/documentation/youtrack-provider.mdx new file mode 100644 index 0000000000..6d5e014030 --- /dev/null +++ b/docs/providers/documentation/youtrack-provider.mdx @@ -0,0 +1,24 @@ +--- +title: 'YouTrack' +sidebarTitle: 'YouTrack Provider' +description: 'YouTrack provider allows you to create new issues in YouTrack.' +--- +import AutoGeneratedSnippet from '/snippets/providers/youtrack-snippet-autogenerated.mdx'; + +## Overview + +YouTrack is a project management tool packed with features that streamline your work and increase productivity on any team project. From software development and DevOps to HR and marketing, all kinds of teams can use YouTrack's functionality to easily track and collaborate on projects of any size. + + + + +### How to get Project ID and Permanent Token? + +1. **Project ID**: The project ID can be found in the URL of the project. For example, in the URL `https:///projects/`, the project ID is ``. + +2. **Permanent Token**: Checkout the [YouTrack - Generate Permanent Token](https://www.jetbrains.com/help/youtrack/server/manage-permanent-token.html) documentation to generate a permanent token. + +## Useful Links + +- [YouTrack](https://www.jetbrains.com/youtrack/) +- [YouTrack - Generate Permanent Token](https://www.jetbrains.com/help/youtrack/server/manage-permanent-token.html) \ No newline at end of file diff --git a/docs/providers/documentation/zabbix-provider.mdx b/docs/providers/documentation/zabbix-provider.mdx index 4bebd743c6..214a1d7ce4 100644 --- a/docs/providers/documentation/zabbix-provider.mdx +++ b/docs/providers/documentation/zabbix-provider.mdx @@ -3,15 +3,14 @@ title: "Zabbix" sidebarTitle: "Zabbix Provider" description: "Zabbix provider allows you to pull/push alerts from Zabbix" --- +import AutoGeneratedSnippet from '/snippets/providers/zabbix-snippet-autogenerated.mdx'; Please note that we currently only support Zabbix of version 6 and above (6.0^) -## Authentication Parameters - -The `zabbix_frontend_url` and `auth_token` are required for connecting to the Zabbix provider. You can obtain them as described in the ["Connecting with the Provider"](./zabbix-provider#connecting-with-the-provider) section. + ## Connecting with the Provider @@ -30,8 +29,20 @@ First, login in to your Zabbix account (the provided `zabbix_frontend_url`) with - This is because some of the scopes we need are available to `Super Admin` user type only. [See here](https://www.zabbix.com/documentation/current/en/manual/api/reference/mediatype/create) -5. Remove all the checkboxes from everything, except 1 random `Access to UI elemets` which is required for any role. -6. In the `API methods` section, select `Allow list` and fill in the scopes as [mentioned below](./zabbix-provider#scopes), in the Scopes section. +5. Remove all the checkboxes from everything, except 1 random `Access to UI elements` which is required for any role. +6. In the `API methods` section, select `Allow list` and fill with these scopes: +- `action.create` +- `action.get` +- `event.acknowledge` +- `mediatype.create` +- `mediatype.get` +- `mediatype.update` +- `problem.get` +- `script.create` +- `script.get` +- `script.update` +- `user.get` +- `user.update` @@ -52,34 +63,6 @@ First, login in to your Zabbix account (the provided `zabbix_frontend_url`) with 5. Unselect the `Set expiration date and time` checkbox and click `Add` 6. Copy the generated API token and keep it for further use in Keep. -## Scopes - -Certain scopes may be required to perform specific actions or queries via Zabbix Provider. Below is a summary of relevant scopes and their use cases: - -- `problem.get` - | Required: `True` - | Description: `The method allows to retrieve problems.` -- `mediatype.get` - | Required: `False` - | Required for Webhook: `True` - | Description: `The method allows to retrieve media types.` -- `mediatype.update` - | Required: `False` - | Required for Webhook: `True` - | Description: `This method allows to update existing media types.` -- `mediatype.create` - | Required: `False` - | Required for Webhook: `True` - | Description: `This method allows to create new media types.` -- `user.get` - | Required: `False` - | Required for Webhook: `True` - | Description: `The method allows to retrieve users.` -- `user.update` - | Required: `False` - | Required for Webhook: `True` - | Description: `This method allows to update existing users.` - ## Notes diff --git a/docs/providers/documentation/zenduty-provider.mdx b/docs/providers/documentation/zenduty-provider.mdx index 6d5c972f76..0d06a385c5 100644 --- a/docs/providers/documentation/zenduty-provider.mdx +++ b/docs/providers/documentation/zenduty-provider.mdx @@ -3,28 +3,13 @@ title: "Zenduty" sidebarTitle: "Zenduty Provider" description: "Zenduty docs" --- +import AutoGeneratedSnippet from '/snippets/providers/zenduty-snippet-autogenerated.mdx'; ![User key](/images/zenduty.jpeg) -## Inputs - -The Zenduty provider gets "title", "summary" and "service" as an input which will be used for the incident. -The `query` method of the ZendutyProvider` class takes the following inputs: - -- `title`: The title of Zenduty incident. -- `summary`: The summary of Zenduty incident. -- `service`: The service of Zenduty incident. - -## Outputs - -None. - -## Authentication Parameters - -The Zenduty gets api key as an authentication method. - -- `api_key` - Zenduty Api Key - Authentication configuration example: + + +## Authentication configuration example: ``` zenduty: diff --git a/docs/providers/documentation/zoom-provider.mdx b/docs/providers/documentation/zoom-provider.mdx new file mode 100644 index 0000000000..73eec9669b --- /dev/null +++ b/docs/providers/documentation/zoom-provider.mdx @@ -0,0 +1,58 @@ +--- +title: "Zoom" +sidebarTitle: "Zoom Provider" +description: "Zoom provider allows you to create meetings with Zoom." +--- +import AutoGeneratedSnippet from '/snippets/providers/zoom-snippet-autogenerated.mdx'; + + +For this integration, you'll need to create a Zoom Application - for more details read https://developers.zoom.us/docs/internal-apps + + + +The `record_meeting` parameter won't work with Zoom's basic plan. With basic plan, you'll be able to connect to the meeting and enable the "recording" manually. + + + + +## Connecting with the Provider + +### Create an Application + + + + + + +Keep the credentials: + + + + + + +### Grant Scopes + + + + + + +### Activate the app + + + + + + +### (Optional) Make sure cloud recording is set on your account + + + + + + + + + + diff --git a/docs/providers/documentation/zoom_chat-provider.mdx b/docs/providers/documentation/zoom_chat-provider.mdx new file mode 100644 index 0000000000..6240726beb --- /dev/null +++ b/docs/providers/documentation/zoom_chat-provider.mdx @@ -0,0 +1,82 @@ +--- +title: "Zoom Chat" +sidebarTitle: "Zoom Chat Provider" +description: "Zoom Chat provider allows you to send Zoom Chats using the Incoming Webhook Zoom application." +--- +import AutoGeneratedSnippet from '/snippets/providers/zoom_chat-snippet-autogenerated.mdx'; + + +For this integration, you will need to add and configure the Incoming Webhook application from the Zoom App Marketplace: https://marketplace.zoom.us/apps/eH_dLuquRd-VYcOsNGy-hQ + + + + +## Connecting with the Provider + +### Enable the Incoming Webhook Application + +The Incoming Webhook application is available in the Zoom App Marketplace. + + + + + + + + + +### Create Team Chat Channel: + +This channel will be the recipient of the Keep notifications. + + + + + + + + + +### Enable the Incoming Webhook Application + +Send `/inc connect ` to the channel to enable a webhook with authorization code. The app will respond with the webhook url and authorization code. + + +You should use the "Full Format" Incoming Webhook Url, which ends in `?format=full`. + + + + + + + + + + +## (Optional) Enabling User JID Lookup + +Messages can optionally include Zoom user JIDs, which are used to tag a particular Zoom user in a message. +This is useful, for example, if a team subscribes to a chat channel but members only wish to be notified when they are explicitly tagged. + +### Create a Zoom Application + +User lookup requires authorization. Create an internal only, Zoom Server to Server OAuth application. + + + + + + + + + +### Assign Required Scopes + + + + + + + + + diff --git a/docs/providers/getting-started.mdx b/docs/providers/getting-started.mdx deleted file mode 100644 index fb10c800d9..0000000000 --- a/docs/providers/getting-started.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -Title: "Providers" -sidebarTitle: "Getting Started" -description: "We tried our best to cover all common providers." ---- - -Click [here](https://github.com/keephq/keep/issues/new?assignees=&labels=feature,provider&template=feature_request.md&title=Missing%20PROVIDER_NAME) if you feel like we're missing some and we'll do our best to add them ASAP. - -Common providers include: - - - AWS, GCP, Azure, etc. - - - Sentry, New Relic, Datadog, etc. - - - PagerDuty, OpsGenie, etc. - - - Email, Slack, Discord, Microsoft Teams, etc. - - - MySQL, Postgresql etc - - - diff --git a/docs/providers/linked-providers.mdx b/docs/providers/linked-providers.mdx new file mode 100644 index 0000000000..59790a3463 --- /dev/null +++ b/docs/providers/linked-providers.mdx @@ -0,0 +1,83 @@ +--- +title: "Linked providers" +description: "Understanding linked vs connected providers in Keep" +--- + +# Linked providers + +In Keep, providers can be either "connected" or "linked." Understanding the difference is important for proper alert routing and management. + + + + + +## Connected vs linked providers + +- **Connected Providers**: These are providers that have been explicitly configured in Keep through the UI or API. They have full provider configuration and authentication details. + +- **Linked Providers**: These are providers that send alerts to Keep without being explicitly connected. They appear automatically when Keep receives alerts from them through webhooks or push mechanisms. + +## How linking works + +When Keep receives alerts from an unconnected provider (like Prometheus pushing alerts), it automatically creates a "linked" provider entry. This allows you to: + +- Track which systems are sending alerts +- See when Keep last received an alert +- Apply deduplication rules specific to that provider + +## Attaching alerts to connected providers + +If you have a connected provider and want to associate incoming alerts with it instead of creating a linked provider, add the `provider_id` query parameter to the webhook URL. + +For example, with Prometheus AlertManager: + +```yaml +alertmanager: + config: + receivers: + - name: "keep" + webhook_configs: + - url: "https://api.keephq.dev/alerts/event/prometheus?provider_id=your_provider_id" +``` + +Or with other webhook-based integrations: + +```bash +# Grafana webhook +https://api.keephq.dev/alerts/event/grafana?provider_id=grafana-prod + +# Datadog webhook +https://api.keephq.dev/alerts/event/datadog?provider_id=datadog-main + +# Generic webhook +https://api.keephq.dev/alerts/event/webhook?provider_id=custom-webhook +``` + +## Best practices + +1. **For Production Systems**: It's recommended to use connected providers when possible, as they provide: + + - Better authentication and security + - Access to provider-specific features + - Clearer audit trail + +2. **For Testing/Development**: Linked providers can be useful for: + + - Quick prototyping + - Testing alert flows + - Temporary integrations + +3. **Converting Linked to Connected**: If you regularly receive alerts from a linked provider, consider: + - Setting up a proper provider connection + - Using the `provider_id` parameter to attach alerts to the connected provider + +## Limitations + +Linked providers: + +- Can't be used to pull alerts or data +- Don't have authentication details +- Can't be used for provider-specific actions +- May have limited deduplication capabilities + +For full capabilities, consider converting linked providers to connected providers when they become part of your permanent alerting infrastructure. diff --git a/docs/providers/overview.md b/docs/providers/overview.md new file mode 100644 index 0000000000..3beb65763a --- /dev/null +++ b/docs/providers/overview.md @@ -0,0 +1,132 @@ +# Providers Overview + +Providers are core components of Keep that allows Keep to either query data, send notifications, get alerts from or manage third-party tools. + +These third-party tools include, among others, Datadog, Cloudwatch, and Sentry for data querying and/or alert management, and Slack, Resend, Twilio, and PagerDuty for notifications/incidents. + +By leveraging Keep Providers, users are able to deeply integrate Keep with the tools they use and trust, providing them with a flexible and powerful way to manage these tools with ease and from a single pane. + +## Available Providers + +- [Airflow](/providers/documentation/airflow-provider) +- [Azure AKS](/providers/documentation/aks-provider) +- [AmazonSQS](/providers/documentation/amazonsqs-provider) +- [Anthropic](/providers/documentation/anthropic-provider) +- [AppDynamics](/providers/documentation/appdynamics-provider) +- [ArgoCD](/providers/documentation/argocd-provider) +- [Flux CD](/providers/documentation/fluxcd-provider) +- [Asana](/providers/documentation/asana-provider) +- [Auth0](/providers/documentation/auth0-provider) +- [Axiom](/providers/documentation/axiom-provider) +- [Azure Monitor](/providers/documentation/azuremonitoring-provider) +- [Bash](/providers/documentation/bash-provider) +- [BigQuery](/providers/documentation/bigquery-provider) +- [Centreon](/providers/documentation/centreon-provider) +- [Checkmk](/providers/documentation/checkmk-provider) +- [Checkly](/providers/documentation/checkly-provider) +- [Cilium](/providers/documentation/cilium-provider) +- [ClickHouse](/providers/documentation/clickhouse-provider) +- [CloudWatch](/providers/documentation/cloudwatch-provider) +- [Console](/providers/documentation/console-provider) +- [Coralogix](/providers/documentation/coralogix-provider) +- [Dash0](/providers/documentation/dash0-provider) +- [Datadog](/providers/documentation/datadog-provider) +- [Databend](/providers/documentation/databend-provider) +- [DeepSeek](/providers/documentation/deepseek-provider) +- [Discord](/providers/documentation/discord-provider) +- [Dynatrace](/providers/documentation/dynatrace-provider) +- [EKS](/providers/documentation/eks-provider) +- [Elastic](/providers/documentation/elastic-provider) +- [Flashduty](/providers/documentation/flashduty-provider) +- [GCP Monitoring](/providers/documentation/gcpmonitoring-provider) +- [Gemini](/providers/documentation/gemini-provider) +- [GitHub](/providers/documentation/github-provider) +- [Github Workflows](/providers/documentation/github_workflows_provider) +- [GitLab](/providers/documentation/gitlab-provider) +- [GitLab Pipelines](/providers/documentation/gitlabpipelines-provider) +- [Google Kubernetes Engine](/providers/documentation/gke-provider) +- [Google Chat](/providers/documentation/google_chat-provider) +- [Grafana](/providers/documentation/grafana-provider) +- [Grafana Incident](/providers/documentation/grafana_incident-provider) +- [Grafana Loki](/providers/documentation/grafana_loki-provider) +- [Grafana OnCall](/providers/documentation/grafana_oncall-provider) +- [Graylog](/providers/documentation/graylog-provider) +- [Grok](/providers/documentation/grok-provider) +- [HTTP](/providers/documentation/http-provider) +- [Icinga2](/providers/documentation/icinga2-provider) +- [ilert](/providers/documentation/ilert-provider) +- [Incident.io](/providers/documentation/incidentio-provider) +- [Incident Manager](/providers/documentation/incidentmanager-provider) +- [Jira On-Prem](/providers/documentation/jira-on-prem-provider) +- [Jira Cloud](/providers/documentation/jira-provider) +- [Kafka](/providers/documentation/kafka-provider) +- [Keep](/providers/documentation/keep-provider) +- [Kibana](/providers/documentation/kibana-provider) +- [Kubernetes](/providers/documentation/kubernetes-provider) +- [LibreNMS](/providers/documentation/libre_nms-provider) +- [Linear](/providers/documentation/linear_provider) +- [LinearB](/providers/documentation/linearb-provider) +- [LiteLLM](/providers/documentation/litellm-provider) +- [Llama.cpp](/providers/documentation/llamacpp-provider) +- [Mailgun](/providers/documentation/mailgun-provider) +- [Mattermost](/providers/documentation/mattermost-provider) +- [Microsoft Planner](/providers/documentation/planner-provider) +- [Monday](/providers/documentation/monday-provider) +- [MongoDB](/providers/documentation/mongodb-provider) +- [MySQL](/providers/documentation/mysql-provider) +- [NetBox](/providers/documentation/netbox-provider) +- [Netdata](/providers/documentation/netdata-provider) +- [New Relic](/providers/documentation/new-relic-provider) +- [Ntfy.sh](/providers/documentation/ntfy-provider) +- [Ollama](/providers/documentation/ollama-provider) +- [OpenAI](/providers/documentation/openai-provider) +- [OpenObserve](/providers/documentation/openobserve-provider) +- [OpenSearch Serverless](/providers/documentation/opensearchserverless-provider) +- [Openshift](/providers/documentation/openshift-provider) +- [Opsgenie](/providers/documentation/opsgenie-provider) +- [Pagerduty](/providers/documentation/pagerduty-provider) +- [Pagertree](/providers/documentation/pagertree-provider) +- [Parseable](/providers/documentation/parseable-provider) +- [Pingdom](/providers/documentation/pingdom-provider) +- [PostgreSQL](/providers/documentation/postgresql-provider) +- [PostHog](/providers/documentation/posthog-provider) +- [Prometheus](/providers/documentation/prometheus-provider) +- [Pushover](/providers/documentation/pushover-provider) +- [Python](/providers/documentation/python-provider) +- [QuickChart](/providers/documentation/quickchart-provider) +- [Redmine](/providers/documentation/redmine-provider) +- [Resend](/providers/documentation/resend-provider) +- [Rollbar](/providers/documentation/rollbar-provider) +- [AWS S3](/providers/documentation/s3-provider) +- [SendGrid](/providers/documentation/sendgrid-provider) +- [Sentry](/providers/documentation/sentry-provider) +- [Service Now](/providers/documentation/service-now-provider) +- [SignalFX](/providers/documentation/signalfx-provider) +- [SIGNL4](/providers/documentation/signl4-provider) +- [Site24x7](/providers/documentation/site24x7-provider) +- [Slack](/providers/documentation/slack-provider) +- [SMTP](/providers/documentation/smtp-provider) +- [Snowflake](/providers/documentation/snowflake-provider) +- [Splunk](/providers/documentation/splunk-provider) +- [Squadcast](/providers/documentation/squadcast-provider) +- [SSH](/providers/documentation/ssh-provider) +- [StatusCake](/providers/documentation/statuscake-provider) +- [SumoLogic](/providers/documentation/sumologic-provider) +- [Microsoft Teams](/providers/documentation/teams-provider) +- [Telegram](/providers/documentation/telegram-provider) +- [Template](/providers/documentation/template) +- [ThousandEyes](/providers/documentation/thousandeyes-provider) +- [Trello](/providers/documentation/trello-provider) +- [Twilio](/providers/documentation/twilio-provider) +- [UptimeKuma](/providers/documentation/uptimekuma-provider) +- [VictoriaLogs](/providers/documentation/victorialogs-provider) +- [Victoriametrics](/providers/documentation/victoriametrics-provider) +- [vLLM](/providers/documentation/vllm-provider) +- [Wazuh](/providers/documentation/wazuh-provider) +- [Webhook](/providers/documentation/webhook-provider) +- [Websocket](/providers/documentation/websocket-provider) +- [YouTrack](/providers/documentation/youtrack-provider) +- [Zabbix](/providers/documentation/zabbix-provider) +- [Zenduty](/providers/documentation/zenduty-provider) +- [Zoom](/providers/documentation/zoom-provider) +- [Zoom Chat](/providers/documentation/zoom_chat-provider) diff --git a/docs/providers/overview.mdx b/docs/providers/overview.mdx index 8e92349409..f9aae0cb3e 100644 --- a/docs/providers/overview.mdx +++ b/docs/providers/overview.mdx @@ -12,6 +12,14 @@ By leveraging Keep Providers, users are able to deeply integrate Keep with the t + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + } > + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + @@ -125,18 +301,18 @@ By leveraging Keep Providers, users are able to deeply integrate Keep with the t > } > + } > @@ -148,6 +324,46 @@ By leveraging Keep Providers, users are able to deeply integrate Keep with the t } >
+ + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + } +> + + } > + + } +> + + } +> + + } > + + } +> + + + } +> + + + } +> + + } +> + + + + } +> + + } > @@ -230,12 +510,20 @@ By leveraging Keep Providers, users are able to deeply integrate Keep with the t } > + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + } @@ -324,6 +660,22 @@ By leveraging Keep Providers, users are able to deeply integrate Keep with the t } > + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + } +> + + + } > @@ -460,6 +868,14 @@ By leveraging Keep Providers, users are able to deeply integrate Keep with the t } >
+ + } +> + + + } +> + + + } +> + + + } +> + + + } +> + +} +> + } + icon={ + + } +> + + } +> + + } > diff --git a/docs/providers/provider-methods.mdx b/docs/providers/provider-methods.mdx new file mode 100644 index 0000000000..f733cfb4af --- /dev/null +++ b/docs/providers/provider-methods.mdx @@ -0,0 +1,273 @@ +--- +title: "Provider methods" +sidebarTitle: "Provider Methods" +--- + +Provider methods are additional capabilities that providers expose beyond the basic `query` and `notify` capabilities ([read more here](/providers/adding-a-new-provider#basics)). These methods allow you to interact with the provider's API in more specific ways, enabling richer integrations and automation capabilities. + +## What are provider methods? + +Developers define provider methods using the `PROVIDER_METHODS` list in each provider class. They represent specific actions or queries that you can perform through the provider's API. These methods extend the basic capabilities of providers beyond simple notifications and queries. + + + + + +For example, a monitoring service provider might expose methods to: + +- Mute/unmute alerts +- Get detailed traces +- Search for specific metrics +- Modify monitoring configurations + +## Using provider methods + +You can access provider methods through: + +- Keep's platform interface via the alert action menu +- Keep's smart AI assistant (for example, "get traces for this alert") +- Keep's API +- Keep's workflows + +### Via UI + +Methods appear in the alert action menu when available for the alert's source provider: + + + + + + + The form is automatically populated with the parameters required by the + method, if they're available in the alert. + + +### Via AI assistant + +Keep's AI assistant can automatically discover and invoke provider methods based on natural language requests by understanding multiple contexts: + + + + + +1. **Alert Context**: The AI understands: + + - The alert's source provider + - Alert metadata and attributes + - Related services and applications + - Current alert status and severity + +2. **Provider Context**: The AI knows: + + - Which providers you have connected to your account + - Available methods for each provider + - Required parameters and their types + - Method descriptions and capabilities + +3. **Historical Context**: The AI learns from: + - Similar past incidents + - Previously successful method invocations + - Common patterns in alert resolution + +For example: + +```text +User: Can you get the traces for this alert? +Assistant: I see this alert came from Datadog. I'll use the Datadog provider's +get_traces method to fetch the traces. I'll use the trace_id from the alert's +metadata: abc-123... + +User: This alert seems related to high latency. Can you help investigate? +Assistant: I'll help investigate the latency issue. Since this is a Datadog alert, +I can: +1. Get recent traces using search_traces() to look for slow requests +2. Fetch metrics using get_metrics() to check system performance +3. Look for related logs using search_logs() + +Would you like me to start with any of these? +``` + +The AI assistant automatically: + +1. Identifies relevant provider methods +2. Extracts required parameters from context +3. Suggests appropriate actions based on the alert type +4. Chains multiple methods for comprehensive investigation + +### Via API + +```python +# Example using a Datadog provider method to mute a monitor +response = await api.post( + f"/providers/{provider_id}/invoke/mute_monitor", + {"monitor_id": "abc123", "duration": 3600} +) +``` + +## Adding new provider methods + +To add a new method to your provider: + +1. Define the method in your provider class (must be an instance method): + +```python +def get_traces(self, trace_id: str) -> dict: + """Get trace details from the provider. + + Args: + trace_id (str): The ID of the trace to retrieve + + Returns: + dict: The trace details + """ + # Implementation + pass +``` + +2. Add method metadata to `PROVIDER_METHODS`: + +```python +from keep.providers.models.provider_method import ProviderMethod + +PROVIDER_METHODS = [ + ProviderMethod( + name="Get Traces", + description="Retrieve trace details", + func_name="get_traces", + type="view", # 'view' or 'action' + scopes=["traces:read"], # Required provider scopes + category="Observability", # Optional category for grouping methods + ) +] +``` + +Note: The `func_params` field is automatically populated by Keep through reflection of the method signature, so you don't need to define it manually. + + +Provider methods must be instance methods (not static or class methods) of the provider class. The method signature is automatically inspected to generate UI forms and parameter validation. + + +### Complete example + +Here's a complete example of a provider with custom methods: + +```python +class MonitoringProvider(BaseProvider): + PROVIDER_DISPLAY_NAME = "Monitoring Service" + + PROVIDER_METHODS = [ + ProviderMethod( + name="Mute Alert", + description="Mute an alert for a specified duration", + func_name="mute_alert", + type="action", + scopes=["alerts:write"], + category="Alert Management", + ), + ProviderMethod( + name="Get Metrics", + description="Retrieve metrics for a service", + func_name="get_metrics", + type="view", + scopes=["metrics:read"], + category="Observability", + ), + ] + + def mute_alert(self, alert_id: str, duration_minutes: int = 60) -> dict: + """ + Mute an alert for the specified duration. + + Args: + alert_id: The ID of the alert to mute + duration_minutes: Duration to mute in minutes (default: 60) + + Returns: + dict: Confirmation of the mute action + """ + # Implementation here + response = self._api_call(f"/alerts/{alert_id}/mute", + {"duration": duration_minutes}) + return {"success": True, "muted_until": response["muted_until"]} + + def get_metrics(self, service_name: str, metric_type: str, + time_range: str = "1h") -> list: + """ + Get metrics for a specific service. + + Args: + service_name: Name of the service + metric_type: Type of metric (cpu, memory, latency, etc.) + time_range: Time range for metrics (default: "1h") + + Returns: + list: List of metric data points + """ + # Implementation here + return self._query(f"metrics.{metric_type}", + service=service_name, + range=time_range) +``` + +### Method types + +- **view**: Returns data for display (for example, getting traces, metrics) +- **action**: Performs an action (for example, muting an alert, creating a ticket) + +### Parameter types + +Supported parameter types for provider methods: + +- `str`: String input field +- `int`: Numeric input field +- `float`: Decimal number input field +- `bool`: Boolean checkbox +- `datetime`: Date/time picker +- `dict`: JSON object input +- `list`: Array/list input +- `Literal`: Dropdown with predefined values +- `Optional[type]`: Optional parameter of the specified type + +Example with different parameter types: + +```python +from typing import Optional, Literal +from datetime import datetime + +def advanced_query( + self, + metric_name: str, # Required string + time_range: Literal["1h", "6h", "24h", "7d"] = "1h", # Dropdown with options + include_metadata: bool = False, # Boolean checkbox + limit: Optional[int] = None, # Optional integer + start_time: Optional[datetime] = None, # Optional datetime picker +) -> dict: + """Query metrics with advanced filtering options.""" + # Implementation + pass +``` + +### Auto-discovery + +Keep automatically inspects provider classes to: + +1. Discover available methods +2. Extract parameter information +3. Generate UI components +4. Enable AI understanding of method capabilities + +## Best practices + +1. **Clear Documentation**: Provide detailed docstrings for methods +2. **Type Hints**: Use Python type hints for parameters +3. **Error Handling**: Return clear error messages +4. **Scopes**: Define minimum required scopes +5. **Validation**: Validate parameters before execution + +## Limitations + +- Currently supports only synchronous methods +- The supported parameter types are limited to basic types +- Methods must be instance methods of the provider class +- Methods are automatically discovered through reflection +- Keep validates parameter types based on type hints diff --git a/docs/providers/what-is-a-provider.mdx b/docs/providers/what-is-a-provider.mdx deleted file mode 100644 index 69a4045800..0000000000 --- a/docs/providers/what-is-a-provider.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "❓ What is a Provider" -sidebarTitle: "What is a Provider?" -description: "A Provider is a component of Keep that enables it to interact with third-party products. It is implemented as extensible Python code, making it easy to enhance and customize." ---- - -Providers are core components of Keep that allow Keep to either query data or send notifications to products such as Datadog, Cloudwatch, and Sentry for data querying, and Slack, Email, and PagerDuty for sending notifications about alerts. - -By leveraging Keep Providers, developers are able to integrate Keep with the tools they use and trust, providing them with a flexible and powerful way to manage their alerts. - -![](/images/providers.png) diff --git a/docs/snippets/providers/airflow-snippet-autogenerated.mdx b/docs/snippets/providers/airflow-snippet-autogenerated.mdx new file mode 100644 index 0000000000..8e1275f2d6 --- /dev/null +++ b/docs/snippets/providers/airflow-snippet-autogenerated.mdx @@ -0,0 +1,9 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/aks-snippet-autogenerated.mdx b/docs/snippets/providers/aks-snippet-autogenerated.mdx new file mode 100644 index 0000000000..72c7a289a2 --- /dev/null +++ b/docs/snippets/providers/aks-snippet-autogenerated.mdx @@ -0,0 +1,34 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **subscription_id**: The azure subscription id (required: True, sensitive: True) +- **client_id**: The azure client id (required: True, sensitive: True) +- **client_secret**: The azure client secret (required: True, sensitive: True) +- **tenant_id**: The azure tenant id (required: True, sensitive: True) +- **resource_group_name**: The azure aks resource group name (required: True, sensitive: True) +- **resource_name**: The azure aks cluster name (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query aks + provider: aks + config: "{{ provider.my_provider_name }}" + with: + command_type: {value} # The command type to operate on the k8s cluster (`get_pods`, `get_pvc`, `get_node_pressure`). +``` + + + + + +Check the following workflow example: +- [aks_basic.yml](https://github.com/keephq/keep/blob/main/examples/workflows/aks_basic.yml) diff --git a/docs/snippets/providers/amazonsqs-snippet-autogenerated.mdx b/docs/snippets/providers/amazonsqs-snippet-autogenerated.mdx new file mode 100644 index 0000000000..d5ebffdc6b --- /dev/null +++ b/docs/snippets/providers/amazonsqs-snippet-autogenerated.mdx @@ -0,0 +1,38 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **region_name**: Region name (required: True, sensitive: False) +- **sqs_queue_url**: SQS Queue URL (required: True, sensitive: False) +- **access_key_id**: Access Key Id (Leave empty if using IAM role at EC2) (required: False, sensitive: False) +- **secret_access_key**: Secret access key (Leave empty if using IAM role at EC2) (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: Key-Id pair is valid and working (mandatory) +- **sqs::read**: Required privileges to receive alert from SQS. If you only want to give read scope to your key-secret pair the permission policy: AmazonSQSReadOnlyAccess. (mandatory) +- **sqs::write**: Required privileges to push messages to SQS. If you only want to give read & write scope to your key-secret pair the permission policy: AmazonSQSFullAccess. + + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query amazonsqs + provider: amazonsqs + config: "{{ provider.my_provider_name }}" + with: + message: {value} + group_id: {value} + dedup_id: {value} +``` + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/anthropic-snippet-autogenerated.mdx b/docs/snippets/providers/anthropic-snippet-autogenerated.mdx new file mode 100644 index 0000000000..297b766ecc --- /dev/null +++ b/docs/snippets/providers/anthropic-snippet-autogenerated.mdx @@ -0,0 +1,30 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: Anthropic API Key (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query anthropic + provider: anthropic + config: "{{ provider.my_provider_name }}" + with: + prompt: {value} # The prompt to query the model with. + model: {value} # The model to query. + max_tokens: {value} # The maximum number of tokens to generate. + structured_output_format: {value} # The structured output format to use. +``` + + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/appdynamics-snippet-autogenerated.mdx b/docs/snippets/providers/appdynamics-snippet-autogenerated.mdx new file mode 100644 index 0000000000..7adf61f345 --- /dev/null +++ b/docs/snippets/providers/appdynamics-snippet-autogenerated.mdx @@ -0,0 +1,23 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **appDynamicsAccountName**: AppDynamics Account Name (required: True, sensitive: False) +- **appId**: AppDynamics appId (required: True, sensitive: False) +- **host**: AppDynamics host (required: True, sensitive: False) +- **appDynamicsAccessToken**: AppDynamics Access Token (required: False, sensitive: False) +- **appDynamicsUsername**: Username (required: False, sensitive: False) +- **appDynamicsPassword**: Password (required: False, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: User is Authorized (mandatory) +- **administrator**: Administrator privileges (mandatory) + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/argocd-snippet-autogenerated.mdx b/docs/snippets/providers/argocd-snippet-autogenerated.mdx new file mode 100644 index 0000000000..ec50191441 --- /dev/null +++ b/docs/snippets/providers/argocd-snippet-autogenerated.mdx @@ -0,0 +1,24 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **argocd_access_token**: Argocd Access Token (required: True, sensitive: True) +- **deployment_url**: Deployment Url (required: True, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: User is Authorized (mandatory) + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + + + +## Topology +This provider pulls [topology](/overview/servicetopology) to Keep. It could be used in [correlations](/overview/correlation-topology) +and [mapping](/overview/enrichment/mapping#mapping-with-topology-data), and as a context +for [alerts](/alerts/sidebar#7-alert-topology-view) and [incidents](/overview#17-incident-topology). \ No newline at end of file diff --git a/docs/snippets/providers/asana-snippet-autogenerated.mdx b/docs/snippets/providers/asana-snippet-autogenerated.mdx new file mode 100644 index 0000000000..0fe36e4d1b --- /dev/null +++ b/docs/snippets/providers/asana-snippet-autogenerated.mdx @@ -0,0 +1,47 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **pat_token**: Personal Access Token for Asana. (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: User is authenticated to Asana. (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query asana + provider: asana + config: "{{ provider.my_provider_name }}" + with: + task_id: {value} # Task ID. + # Apart from the above parameters, you can also provide few other parameters. Refer to the [Asana API documentation](https://developers.asana.com/docs/update-a-task) for more details. +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query asana + provider: asana + config: "{{ provider.my_provider_name }}" + with: + name: {value} # Task Name. + projects: {value} # List of Project IDs. + # Apart from the above parameters, you can also provide few other parameters. Refer to the [Asana API documentation](https://developers.asana.com/docs/update-a-task) for more details. +``` + + + + +Check the following workflow examples: +- [create-task-in-asana.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/create-task-in-asana.yaml) +- [update-task-in-asana.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/update-task-in-asana.yaml) diff --git a/docs/snippets/providers/auth0-snippet-autogenerated.mdx b/docs/snippets/providers/auth0-snippet-autogenerated.mdx new file mode 100644 index 0000000000..7d746bfc43 --- /dev/null +++ b/docs/snippets/providers/auth0-snippet-autogenerated.mdx @@ -0,0 +1,31 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **domain**: Auth0 Domain (required: True, sensitive: False) +- **token**: Auth0 API Token (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query auth0 + provider: auth0 + config: "{{ provider.my_provider_name }}" + with: + log_type: {value} + previous_users: {value} +``` + + + + + +Check the following workflow example: +- [new-auth0-users-monitor.yml](https://github.com/keephq/keep/blob/main/examples/workflows/new-auth0-users-monitor.yml) diff --git a/docs/snippets/providers/axiom-snippet-autogenerated.mdx b/docs/snippets/providers/axiom-snippet-autogenerated.mdx new file mode 100644 index 0000000000..f7eeb67909 --- /dev/null +++ b/docs/snippets/providers/axiom-snippet-autogenerated.mdx @@ -0,0 +1,33 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_token**: Axiom API Token (required: True, sensitive: True) +- **organization_id**: Axiom Organization ID (required: False, sensitive: False) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query axiom + provider: axiom + config: "{{ provider.my_provider_name }}" + with: + dataset: {value} + datasets_api_url: {value} + organization_id: {value} + startTime: {value} + endTime: {value} + query: {value} # command to execute +``` + + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/azuremonitoring-snippet-autogenerated.mdx b/docs/snippets/providers/azuremonitoring-snippet-autogenerated.mdx new file mode 100644 index 0000000000..0fd8279afc --- /dev/null +++ b/docs/snippets/providers/azuremonitoring-snippet-autogenerated.mdx @@ -0,0 +1,26 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + + +## Connecting via Webhook (omnidirectional) +This provider supports webhooks. + + +To send alerts from Azure Monitor to Keep, Use the following webhook url to configure Azure Monitor send alerts to Keep: + +1. In Azure Monitor, create a new Action Group. +2. In the Action Group, add a new action of type "Webhook". +3. In the Webhook action, configure the webhook with the following settings. +- **Name**: keep-azuremonitoring-webhook-integration +- **URL**: Your Keep Backend URL +4. Save the Action Group. +5. In the Alert Rule, configure the Action Group to use the Action Group created in step 1. +6. Save the Alert Rule. +7. Test the Alert Rule to ensure that the alerts are being sent to Keep. + diff --git a/docs/snippets/providers/base-snippet-autogenerated.mdx b/docs/snippets/providers/base-snippet-autogenerated.mdx new file mode 100644 index 0000000000..74b7d27f0c --- /dev/null +++ b/docs/snippets/providers/base-snippet-autogenerated.mdx @@ -0,0 +1,56 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query base + provider: base + config: "{{ provider.my_provider_name }}" + with: + kwargs: {value} # The provider context (with statement) +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query base + provider: base + config: "{{ provider.my_provider_name }}" + with: + # The provider context (with statement) +``` + + + + +Check the following workflow examples: +- [change.yml](https://github.com/keephq/keep/blob/main/examples/workflows/change.yml) +- [conditionally_run_if_ai_says_so.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/conditionally_run_if_ai_says_so.yaml) +- [consts_and_vars.yml](https://github.com/keephq/keep/blob/main/examples/workflows/consts_and_vars.yml) +- [create_alert_from_vm_metric.yml](https://github.com/keephq/keep/blob/main/examples/workflows/create_alert_from_vm_metric.yml) +- [create_alerts_from_mysql.yml](https://github.com/keephq/keep/blob/main/examples/workflows/create_alerts_from_mysql.yml) +- [create_multi_alert_from_vm_metric.yml](https://github.com/keephq/keep/blob/main/examples/workflows/create_multi_alert_from_vm_metric.yml) +- [db_disk_space_monitor.yml](https://github.com/keephq/keep/blob/main/examples/workflows/db_disk_space_monitor.yml) +- [disk_grown_defects_rule.yml](https://github.com/keephq/keep/blob/main/examples/workflows/disk_grown_defects_rule.yml) +- [elastic_enrich_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/elastic_enrich_example.yml) +- [ifelse.yml](https://github.com/keephq/keep/blob/main/examples/workflows/ifelse.yml) +- [incident-tier-escalation.yml](https://github.com/keephq/keep/blob/main/examples/workflows/incident-tier-escalation.yml) +- [openshift_pod_restart.yml](https://github.com/keephq/keep/blob/main/examples/workflows/openshift_pod_restart.yml) +- [query_victoriametrics.yml](https://github.com/keephq/keep/blob/main/examples/workflows/query_victoriametrics.yml) +- [raw_sql_query_datetime.yml](https://github.com/keephq/keep/blob/main/examples/workflows/raw_sql_query_datetime.yml) +- [webhook_example_foreach.yml](https://github.com/keephq/keep/blob/main/examples/workflows/webhook_example_foreach.yml) +- [workflow_start_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/workflow_start_example.yml) + + +## Topology +This provider pulls [topology](/overview/servicetopology) to Keep. It could be used in [correlations](/overview/correlation-topology) +and [mapping](/overview/enrichment/mapping#mapping-with-topology-data), and as a context +for [alerts](/alerts/sidebar#7-alert-topology-view) and [incidents](/overview#17-incident-topology). \ No newline at end of file diff --git a/docs/snippets/providers/bash-snippet-autogenerated.mdx b/docs/snippets/providers/bash-snippet-autogenerated.mdx new file mode 100644 index 0000000000..bab1a04809 --- /dev/null +++ b/docs/snippets/providers/bash-snippet-autogenerated.mdx @@ -0,0 +1,27 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query bash + provider: bash + config: "{{ provider.my_provider_name }}" + with: + timeout: {value} + command: {value} + shell: {value} +``` + + + + + +Check the following workflow example: +- [bash_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/bash_example.yml) diff --git a/docs/snippets/providers/bigquery-snippet-autogenerated.mdx b/docs/snippets/providers/bigquery-snippet-autogenerated.mdx new file mode 100644 index 0000000000..1afc701709 --- /dev/null +++ b/docs/snippets/providers/bigquery-snippet-autogenerated.mdx @@ -0,0 +1,31 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **service_account_json**: The service account JSON with container.viewer role (required: True, sensitive: True) +- **project_id**: Google Cloud project ID. If not provided, it will try to fetch it from the environment variable 'GOOGLE_CLOUD_PROJECT' (required: False, sensitive: False) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query bigquery + provider: bigquery + config: "{{ provider.my_provider_name }}" + with: + query: {value} +``` + + + + + +Check the following workflow examples: +- [bigquery.yml](https://github.com/keephq/keep/blob/main/examples/workflows/bigquery.yml) +- [failed-to-login-workflow.yml](https://github.com/keephq/keep/blob/main/examples/workflows/failed-to-login-workflow.yml) diff --git a/docs/snippets/providers/centreon-snippet-autogenerated.mdx b/docs/snippets/providers/centreon-snippet-autogenerated.mdx new file mode 100644 index 0000000000..6684090687 --- /dev/null +++ b/docs/snippets/providers/centreon-snippet-autogenerated.mdx @@ -0,0 +1,18 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **host_url**: Centreon Host URL (required: True, sensitive: False) +- **api_token**: Centreon API Token (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: User is authenticated + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/checkly-snippet-autogenerated.mdx b/docs/snippets/providers/checkly-snippet-autogenerated.mdx new file mode 100644 index 0000000000..35c85662a9 --- /dev/null +++ b/docs/snippets/providers/checkly-snippet-autogenerated.mdx @@ -0,0 +1,18 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **checklyApiKey**: Checkly API Key (required: True, sensitive: True) +- **accountId**: Checkly Account ID (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **read_alerts**: Read alerts from Checkly + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/checkmk-snippet-autogenerated.mdx b/docs/snippets/providers/checkmk-snippet-autogenerated.mdx new file mode 100644 index 0000000000..8e1275f2d6 --- /dev/null +++ b/docs/snippets/providers/checkmk-snippet-autogenerated.mdx @@ -0,0 +1,9 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/cilium-snippet-autogenerated.mdx b/docs/snippets/providers/cilium-snippet-autogenerated.mdx new file mode 100644 index 0000000000..79376178e5 --- /dev/null +++ b/docs/snippets/providers/cilium-snippet-autogenerated.mdx @@ -0,0 +1,19 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **cilium_base_endpoint**: The base endpoint of the cilium hubble relay (required: True, sensitive: False) + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + + + +## Topology +This provider pulls [topology](/overview/servicetopology) to Keep. It could be used in [correlations](/overview/correlation-topology) +and [mapping](/overview/enrichment/mapping#mapping-with-topology-data), and as a context +for [alerts](/alerts/sidebar#7-alert-topology-view) and [incidents](/overview#17-incident-topology). \ No newline at end of file diff --git a/docs/snippets/providers/clickhouse-snippet-autogenerated.mdx b/docs/snippets/providers/clickhouse-snippet-autogenerated.mdx new file mode 100644 index 0000000000..7657e38a77 --- /dev/null +++ b/docs/snippets/providers/clickhouse-snippet-autogenerated.mdx @@ -0,0 +1,52 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **username**: Clickhouse username (required: True, sensitive: False) +- **password**: Clickhouse password (required: True, sensitive: True) +- **host**: Clickhouse hostname (required: True, sensitive: False) +- **port**: Clickhouse port (required: True, sensitive: False) +- **database**: Clickhouse database name (required: False, sensitive: False) +- **protocol**: Protocol ('clickhouses' for SSL, 'clickhouse' for no SSL, 'http' or 'https') (required: True, sensitive: False) +- **verify**: Enable SSL verification (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **connect_to_server**: The user can connect to the server (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query clickhouse + provider: clickhouse + config: "{{ provider.my_provider_name }}" + with: + query: {value} + single_row: {value} +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query clickhouse + provider: clickhouse + config: "{{ provider.my_provider_name }}" + with: + query: {value} + single_row: {value} +``` + + + + +Check the following workflow examples: +- [clickhouse_multiquery.yml](https://github.com/keephq/keep/blob/main/examples/workflows/clickhouse_multiquery.yml) +- [query_clickhouse.yml](https://github.com/keephq/keep/blob/main/examples/workflows/query_clickhouse.yml) diff --git a/docs/snippets/providers/cloudwatch-snippet-autogenerated.mdx b/docs/snippets/providers/cloudwatch-snippet-autogenerated.mdx new file mode 100644 index 0000000000..a9a314eacc --- /dev/null +++ b/docs/snippets/providers/cloudwatch-snippet-autogenerated.mdx @@ -0,0 +1,50 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **region**: AWS region (required: True, sensitive: False) +- **access_key**: AWS access key (Leave empty if using IAM role at EC2) (required: False, sensitive: True) +- **access_key_secret**: AWS access key secret (Leave empty if using IAM role at EC2) (required: False, sensitive: True) +- **session_token**: AWS Session Token (required: False, sensitive: True) +- **cloudwatch_sns_topic**: AWS Cloudwatch SNS Topic [ARN or name] (required: False, sensitive: False) +- **protocol**: Protocol to use for the webhook (required: True, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **cloudwatch:DescribeAlarms**: Required to retrieve information about alarms. (mandatory) ([Documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarms.html)) +- **cloudwatch:PutMetricAlarm**: Required to update information about alarms. This mainly use to add Keep as an SNS action to the alarm. ([Documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_PutMetricAlarm.html)) +- **sns:ListSubscriptionsByTopic**: Required to list all subscriptions of a topic, so Keep will be able to add itself as a subscription. ([Documentation](https://docs.aws.amazon.com/sns/latest/dg/sns-access-policy-language-api-permissions-reference.html)) +- **logs:GetQueryResults**: Part of CloudWatchLogsReadOnlyAccess role. Required to retrieve the results of CloudWatch Logs Insights queries. ([Documentation](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetQueryResults.html)) +- **logs:DescribeQueries**: Part of CloudWatchLogsReadOnlyAccess role. Required to describe the results of CloudWatch Logs Insights queries. ([Documentation](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueries.html)) +- **logs:StartQuery**: Part of CloudWatchLogsReadOnlyAccess role. Required to start CloudWatch Logs Insights queries. ([Documentation](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html)) +- **iam:SimulatePrincipalPolicy**: Allow Keep to test the scopes of the current user/role without modifying any resource. ([Documentation](https://docs.aws.amazon.com/IAM/latest/APIReference/API_SimulatePrincipalPolicy.html)) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query cloudwatch + provider: cloudwatch + config: "{{ provider.my_provider_name }}" + with: + log_group: {value} + log_groups: {value} + remove_ptr_from_results: {value} + query: {value} + hours: {value} +``` + + + + + +Check the following workflow examples: +- [retrieve_cloudwatch_logs.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/retrieve_cloudwatch_logs.yaml) +- [slack_basic.yml](https://github.com/keephq/keep/blob/main/examples/workflows/slack_basic.yml) +- [slack_basic_cel.yml](https://github.com/keephq/keep/blob/main/examples/workflows/slack_basic_cel.yml) diff --git a/docs/snippets/providers/console-snippet-autogenerated.mdx b/docs/snippets/providers/console-snippet-autogenerated.mdx new file mode 100644 index 0000000000..b0e546936f --- /dev/null +++ b/docs/snippets/providers/console-snippet-autogenerated.mdx @@ -0,0 +1,60 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query console + provider: console + config: "{{ provider.my_provider_name }}" + with: + message: {value} + logger: {value} + severity: {value} +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query console + provider: console + config: "{{ provider.my_provider_name }}" + with: + message: {value} # The message to be printed in to the console + logger: {value} # Whether to use the logger or not + severity: {value} # The severity of the message if logger is True +``` + + + + +Check the following workflow examples: +- [aks_basic.yml](https://github.com/keephq/keep/blob/main/examples/workflows/aks_basic.yml) +- [change.yml](https://github.com/keephq/keep/blob/main/examples/workflows/change.yml) +- [complex-conditions-cel.yml](https://github.com/keephq/keep/blob/main/examples/workflows/complex-conditions-cel.yml) +- [console_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/console_example.yml) +- [consts_and_dict.yml](https://github.com/keephq/keep/blob/main/examples/workflows/consts_and_dict.yml) +- [eks_advanced.yml](https://github.com/keephq/keep/blob/main/examples/workflows/eks_advanced.yml) +- [eks_basic.yml](https://github.com/keephq/keep/blob/main/examples/workflows/eks_basic.yml) +- [fluxcd_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/fluxcd_example.yml) +- [gke.yml](https://github.com/keephq/keep/blob/main/examples/workflows/gke.yml) +- [ifelse.yml](https://github.com/keephq/keep/blob/main/examples/workflows/ifelse.yml) +- [incident-enrich.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/incident-enrich.yaml) +- [incident_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/incident_example.yml) +- [inputs_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/inputs_example.yml) +- [multi-condition-cel.yml](https://github.com/keephq/keep/blob/main/examples/workflows/multi-condition-cel.yml) +- [mustache-paths-example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/mustache-paths-example.yml) +- [openshift_basic.yml](https://github.com/keephq/keep/blob/main/examples/workflows/openshift_basic.yml) +- [openshift_monitoring_and_remediation.yml](https://github.com/keephq/keep/blob/main/examples/workflows/openshift_monitoring_and_remediation.yml) +- [openshift_pod_restart.yml](https://github.com/keephq/keep/blob/main/examples/workflows/openshift_pod_restart.yml) +- [pattern-matching-cel.yml](https://github.com/keephq/keep/blob/main/examples/workflows/pattern-matching-cel.yml) +- [severity_changed.yml](https://github.com/keephq/keep/blob/main/examples/workflows/severity_changed.yml) +- [webhook_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/webhook_example.yml) +- [webhook_example_foreach.yml](https://github.com/keephq/keep/blob/main/examples/workflows/webhook_example_foreach.yml) diff --git a/docs/snippets/providers/coralogix-snippet-autogenerated.mdx b/docs/snippets/providers/coralogix-snippet-autogenerated.mdx new file mode 100644 index 0000000000..8e1275f2d6 --- /dev/null +++ b/docs/snippets/providers/coralogix-snippet-autogenerated.mdx @@ -0,0 +1,9 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/dash0-snippet-autogenerated.mdx b/docs/snippets/providers/dash0-snippet-autogenerated.mdx new file mode 100644 index 0000000000..8e1275f2d6 --- /dev/null +++ b/docs/snippets/providers/dash0-snippet-autogenerated.mdx @@ -0,0 +1,9 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/databend-snippet-autogenerated.mdx b/docs/snippets/providers/databend-snippet-autogenerated.mdx new file mode 100644 index 0000000000..8db5cc9986 --- /dev/null +++ b/docs/snippets/providers/databend-snippet-autogenerated.mdx @@ -0,0 +1,35 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **host_url**: Databend host_url (required: True, sensitive: False) +- **username**: Databend username (required: True, sensitive: False) +- **password**: Databend password (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **connect_to_server**: The user can connect to the server (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query databend + provider: databend + config: "{{ provider.my_provider_name }}" + with: + query: {value} +``` + + + + + +Check the following workflow example: +- [query-databend.yml](https://github.com/keephq/keep/blob/main/examples/workflows/query-databend.yml) diff --git a/docs/snippets/providers/datadog-snippet-autogenerated.mdx b/docs/snippets/providers/datadog-snippet-autogenerated.mdx new file mode 100644 index 0000000000..bbd90d10ce --- /dev/null +++ b/docs/snippets/providers/datadog-snippet-autogenerated.mdx @@ -0,0 +1,73 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: Datadog Api Key (required: True, sensitive: True) +- **app_key**: Datadog App Key (required: True, sensitive: True) +- **domain**: Datadog API domain (required: False, sensitive: False) +- **environment**: Topology environment name (required: False, sensitive: False) +- **oauth_token**: For OAuth flow (required: False, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **events_read**: Read events data. (mandatory) +- **monitors_read**: Read monitors (mandatory) ([Documentation](https://docs.datadoghq.com/account_management/rbac/permissions/#monitors)) +- **monitors_write**: Write monitors ([Documentation](https://docs.datadoghq.com/account_management/rbac/permissions/#monitors)) +- **create_webhooks**: Create webhooks integrations +- **metrics_read**: View custom metrics. +- **logs_read**: Read log data. +- **apm_read**: Read APM data for Topology creation. +- **apm_service_catalog_read**: Read APM service catalog for Topology creation. + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query datadog + provider: datadog + config: "{{ provider.my_provider_name }}" + with: + query: {value} + timeframe: {value} + query_type: {value} +``` + + + + + +Check the following workflow examples: +- [complex-conditions-cel.yml](https://github.com/keephq/keep/blob/main/examples/workflows/complex-conditions-cel.yml) +- [datadog-log-monitor.yml](https://github.com/keephq/keep/blob/main/examples/workflows/datadog-log-monitor.yml) +- [db_disk_space_monitor.yml](https://github.com/keephq/keep/blob/main/examples/workflows/db_disk_space_monitor.yml) +- [service-error-rate-monitor-datadog.yml](https://github.com/keephq/keep/blob/main/examples/workflows/service-error-rate-monitor-datadog.yml) + + +## Topology +This provider pulls [topology](/overview/servicetopology) to Keep. It could be used in [correlations](/overview/correlation-topology) +and [mapping](/overview/enrichment/mapping#mapping-with-topology-data), and as a context +for [alerts](/alerts/sidebar#7-alert-topology-view) and [incidents](/overview#17-incident-topology). + +## Provider Methods +The provider exposes the following [Provider Methods](/providers/provider-methods#via-ai-assistant). They are available in the [AI Assistant](/overview/ai-incident-assistant). + +- **mute_monitor** Mute a monitor (action, scopes: monitors_write) + +- **unmute_monitor** Unmute a monitor (action, scopes: monitors_write) + +- **get_monitor_events** Get all events related to this monitor (view, scopes: events_read) + +- **get_trace** Get trace by ID (view, scopes: apm_read) + +- **create_incident** Create an incident (action, scopes: incidents_write) + +- **resolve_incident** Resolve an active incident (action, scopes: incidents_write) + +- **add_incident_timeline_note** Add a note to an incident timeline (action, scopes: incidents_write) + diff --git a/docs/snippets/providers/deepseek-snippet-autogenerated.mdx b/docs/snippets/providers/deepseek-snippet-autogenerated.mdx new file mode 100644 index 0000000000..66dd386fc9 --- /dev/null +++ b/docs/snippets/providers/deepseek-snippet-autogenerated.mdx @@ -0,0 +1,33 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: DeepSeek API Key (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query deepseek + provider: deepseek + config: "{{ provider.my_provider_name }}" + with: + prompt: {value} # The user query. + model: {value} # The model to use for the query. + max_tokens: {value} # The maximum number of tokens to generate. + system_prompt: {value} # The system prompt to use. + structured_output_format: {value} # The structured output format. +``` + + + + + +Check the following workflow example: +- [enrich_using_structured_output_from_deepseek.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/enrich_using_structured_output_from_deepseek.yaml) diff --git a/docs/snippets/providers/discord-snippet-autogenerated.mdx b/docs/snippets/providers/discord-snippet-autogenerated.mdx new file mode 100644 index 0000000000..c38efe0f9e --- /dev/null +++ b/docs/snippets/providers/discord-snippet-autogenerated.mdx @@ -0,0 +1,30 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **webhook_url**: Discord Webhook Url (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query discord + provider: discord + config: "{{ provider.my_provider_name }}" + with: + content: {value} # The content of the message. + components: {value} # The components of the message. +``` + + + + +Check the following workflow example: +- [discord_basic.yml](https://github.com/keephq/keep/blob/main/examples/workflows/discord_basic.yml) diff --git a/docs/snippets/providers/dynatrace-snippet-autogenerated.mdx b/docs/snippets/providers/dynatrace-snippet-autogenerated.mdx new file mode 100644 index 0000000000..1212b68921 --- /dev/null +++ b/docs/snippets/providers/dynatrace-snippet-autogenerated.mdx @@ -0,0 +1,21 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **environment_id**: Dynatrace's environment ID (required: True, sensitive: False) +- **api_token**: Dynatrace's API token (required: True, sensitive: True) +- **alerting_profile**: Dynatrace's alerting profile for the webhook integration. Defaults to 'Default' (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **problems.read**: Read access to Dynatrace problems (mandatory) +- **settings.read**: Read access to Dynatrace settings [for webhook installation] +- **settings.write**: Write access to Dynatrace settings [for webhook installation] + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/eks-snippet-autogenerated.mdx b/docs/snippets/providers/eks-snippet-autogenerated.mdx new file mode 100644 index 0000000000..028d04048d --- /dev/null +++ b/docs/snippets/providers/eks-snippet-autogenerated.mdx @@ -0,0 +1,82 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **region**: AWS region where the EKS cluster is located (required: True, sensitive: False) +- **cluster_name**: Name of the EKS cluster (required: True, sensitive: False) +- **access_key**: AWS access key (Leave empty if using IAM role at EC2) (required: False, sensitive: True) +- **secret_access_key**: AWS secret access key (Leave empty if using IAM role at EC2) (required: False, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **eks:DescribeCluster**: Required to get cluster information (mandatory) ([Documentation](https://docs.aws.amazon.com/eks/latest/APIReference/API_DescribeCluster.html)) +- **eks:ListClusters**: Required to list available clusters (mandatory) ([Documentation](https://docs.aws.amazon.com/eks/latest/APIReference/API_ListClusters.html)) +- **pods:delete**: Required to delete/restart pods ([Documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)) +- **deployments:scale**: Required to scale deployments ([Documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)) +- **pods:list**: Required to list pods ([Documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)) +- **pods:get**: Required to get pod details ([Documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)) +- **pods:logs**: Required to get pod logs ([Documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query eks + provider: eks + config: "{{ provider.my_provider_name }}" + with: + command_type: {value} # Type of query to execute + # Additional arguments for the query +``` + + + + + +Check the following workflow examples: +- [eks_advanced.yml](https://github.com/keephq/keep/blob/main/examples/workflows/eks_advanced.yml) +- [eks_basic.yml](https://github.com/keephq/keep/blob/main/examples/workflows/eks_basic.yml) + + +## Provider Methods +The provider exposes the following [Provider Methods](/providers/provider-methods#via-ai-assistant). They are available in the [AI Assistant](/overview/ai-incident-assistant). + +- **get_pods** List all pods in a namespace or across all namespaces (view, scopes: pods:list, pods:get) + + - `namespace`: The namespace to list pods from. If None, lists pods from all namespaces. +- **get_pvc** List all PVCs in a namespace or across all namespaces (view, scopes: pods:list) + + - `namespace`: The namespace to list pods from. If None, lists pods from all namespaces. +- **get_node_pressure** Get pressure metrics for all nodes (view, scopes: pods:list) + +- **exec_command** Execute a command in a pod (action, scopes: pods:exec) + + - `namespace`: Namespace of the pod + - `pod_name`: Name of the pod + - `command`: Command to execute (string or array) + - `container`: Name of the container (optional, defaults to first container) +- **restart_pod** Restart a pod by deleting it (action, scopes: pods:delete) + + - `namespace`: Namespace of the pod + - `pod_name`: Name of the pod +- **get_deployment** Get deployment information (view, scopes: pods:list) + + - `deployment_name`: Name of the deployment to get + - `namespace`: Target namespace (defaults to “default”) +- **scale_deployment** Scale a deployment to specified replicas (action, scopes: deployments:scale) + + - `deployment_name`: Name of the deployment to get + - `namespace`: Target namespace (defaults to “default”) + - `replicas`: Number of replicas to scale to +- **get_pod_logs** Get logs from a pod (view, scopes: pods:logs) + + - `namespace`: Namespace of the pod + - `pod_name`: Name of the pod + - `container`: Name of the container (optional) + - `tail_lines`: Number of lines to fetch from the end of logs (default: 100) diff --git a/docs/snippets/providers/elastic-snippet-autogenerated.mdx b/docs/snippets/providers/elastic-snippet-autogenerated.mdx new file mode 100644 index 0000000000..2d53471982 --- /dev/null +++ b/docs/snippets/providers/elastic-snippet-autogenerated.mdx @@ -0,0 +1,41 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **host**: Elasticsearch host (required: False, sensitive: False) +- **cloud_id**: Elasticsearch cloud id (required: False, sensitive: False) +- **verify**: Enable SSL verification (required: False, sensitive: False) +- **api_key**: Elasticsearch API Key (required: False, sensitive: True) +- **username**: Elasticsearch username (required: False, sensitive: False) +- **password**: Elasticsearch password (required: False, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **connect_to_server**: The user can connect to the server (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query elastic + provider: elastic + config: "{{ provider.my_provider_name }}" + with: + query: {value} # The body of the query + index: {value} # The index to search in +``` + + + + + +Check the following workflow examples: +- [create_alerts_from_elastic.yml](https://github.com/keephq/keep/blob/main/examples/workflows/create_alerts_from_elastic.yml) +- [elastic_basic.yml](https://github.com/keephq/keep/blob/main/examples/workflows/elastic_basic.yml) +- [elastic_enrich_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/elastic_enrich_example.yml) diff --git a/docs/snippets/providers/flashduty-snippet-autogenerated.mdx b/docs/snippets/providers/flashduty-snippet-autogenerated.mdx new file mode 100644 index 0000000000..af71477809 --- /dev/null +++ b/docs/snippets/providers/flashduty-snippet-autogenerated.mdx @@ -0,0 +1,33 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **integration_key**: Flashduty integration key (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query flashduty + provider: flashduty + config: "{{ provider.my_provider_name }}" + with: + title: {value} # The title of the incident + event_status: {value} # The status of the incident, one of: Info, Warning, Critical, Ok + description: {value} # The description of the incident + alert_key: {value} # Alert identifier, used to update or automatically recover existing alerts. If you're reporting a recovery event, this value must exist. + labels: {value} # The labels of the incident +``` + + + + +Check the following workflow example: +- [flashduty_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/flashduty_example.yml) diff --git a/docs/snippets/providers/fluxcd-snippet-autogenerated.mdx b/docs/snippets/providers/fluxcd-snippet-autogenerated.mdx new file mode 100644 index 0000000000..e6275b36fe --- /dev/null +++ b/docs/snippets/providers/fluxcd-snippet-autogenerated.mdx @@ -0,0 +1,62 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **kubeconfig**: Kubeconfig file content (required: False, sensitive: True) +- **context**: Kubernetes context to use (required: False, sensitive: False) +- **namespace**: Namespace where Flux CD is installed (required: False, sensitive: False) +- **api_server**: Kubernetes API server URL (required: False, sensitive: False) +- **token**: Kubernetes API token (required: False, sensitive: True) +- **insecure**: Skip TLS verification (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: User is Authorized (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query fluxcd + provider: fluxcd + config: "{{ provider.my_provider_name }}" + with: + **_: {value} # Additional arguments (ignored) +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query fluxcd + provider: fluxcd + config: "{{ provider.my_provider_name }}" + with: + action: {value} # The action to perform. Supported actions are: +- reconcile: Trigger a reconciliation for a FluxCD resource. + # Additional arguments for the action. +``` + + + + +Check the following workflow example: +- [fluxcd_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/fluxcd_example.yml) + + +## Topology +This provider pulls [topology](/overview/servicetopology) to Keep. It could be used in [correlations](/overview/correlation-topology) +and [mapping](/overview/enrichment/mapping#mapping-with-topology-data), and as a context +for [alerts](/alerts/sidebar#7-alert-topology-view) and [incidents](/overview#17-incident-topology). + +## Provider Methods +The provider exposes the following [Provider Methods](/providers/provider-methods#via-ai-assistant). They are available in the [AI Assistant](/overview/ai-incident-assistant). + +- **get_fluxcd_resources** Get resources from Flux CD (, scopes: no additional scopes) + diff --git a/docs/snippets/providers/gcpmonitoring-snippet-autogenerated.mdx b/docs/snippets/providers/gcpmonitoring-snippet-autogenerated.mdx new file mode 100644 index 0000000000..9d7784bf2f --- /dev/null +++ b/docs/snippets/providers/gcpmonitoring-snippet-autogenerated.mdx @@ -0,0 +1,45 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **service_account_json**: A service account JSON with logging viewer role (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **roles/logs.viewer**: Read access to GCP logging (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query gcpmonitoring + provider: gcpmonitoring + config: "{{ provider.my_provider_name }}" + with: + filter: {value} + timedelta_in_days: {value} + page_size: {value} + raw: {value} + project: {value} +``` + + + + + +Check the following workflow examples: +- [gcp_logging_open_ai.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/gcp_logging_open_ai.yaml) +- [slack-message-reaction.yml](https://github.com/keephq/keep/blob/main/examples/workflows/slack-message-reaction.yml) + + +## Provider Methods +The provider exposes the following [Provider Methods](/providers/provider-methods#via-ai-assistant). They are available in the [AI Assistant](/overview/ai-incident-assistant). + +- **execute_query** Query the GCP logs (view, scopes: no additional scopes) + diff --git a/docs/snippets/providers/gemini-snippet-autogenerated.mdx b/docs/snippets/providers/gemini-snippet-autogenerated.mdx new file mode 100644 index 0000000000..0f46c6b527 --- /dev/null +++ b/docs/snippets/providers/gemini-snippet-autogenerated.mdx @@ -0,0 +1,30 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: Google AI API Key (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query gemini + provider: gemini + config: "{{ provider.my_provider_name }}" + with: + prompt: {value} + model: {value} + max_tokens: {value} + structured_output_format: {value} +``` + + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/github-snippet-autogenerated.mdx b/docs/snippets/providers/github-snippet-autogenerated.mdx new file mode 100644 index 0000000000..13ea2bcaec --- /dev/null +++ b/docs/snippets/providers/github-snippet-autogenerated.mdx @@ -0,0 +1,65 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **access_token**: GitHub Access Token (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query github + provider: github + config: "{{ provider.my_provider_name }}" + with: + repository: {value} + previous_stars_count: {value} + last_stargazer: {value} +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query github + provider: github + config: "{{ provider.my_provider_name }}" + with: + run_action: {value} # The action to run. + workflow: {value} # The workflow to run. + repo_name: {value} # The repository name. + repo_owner: {value} # The repository owner. + ref: {value} # The ref to use. + inputs: {value} # The inputs to use. +``` + + + + +Check the following workflow examples: +- [datadog-log-monitor.yml](https://github.com/keephq/keep/blob/main/examples/workflows/datadog-log-monitor.yml) +- [db_disk_space_monitor.yml](https://github.com/keephq/keep/blob/main/examples/workflows/db_disk_space_monitor.yml) +- [new_github_stars.yml](https://github.com/keephq/keep/blob/main/examples/workflows/new_github_stars.yml) +- [run-github-workflow.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/run-github-workflow.yaml) +- [service-error-rate-monitor-datadog.yml](https://github.com/keephq/keep/blob/main/examples/workflows/service-error-rate-monitor-datadog.yml) +- [update_workflows_from_http.yml](https://github.com/keephq/keep/blob/main/examples/workflows/update_workflows_from_http.yml) +- [zoom_chat_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/zoom_chat_example.yml) + + +## Provider Methods +The provider exposes the following [Provider Methods](/providers/provider-methods#via-ai-assistant). They are available in the [AI Assistant](/overview/ai-incident-assistant). + +- **get_last_commits** Get the N last commits from a GitHub repository (view, scopes: no additional scopes) + + - `repository`: The GitHub repository to get the commits from. + - `n`: The number of commits to get. +- **get_last_releases** Get the N last releases and their changelog from a GitHub repository (view, scopes: no additional scopes) + + - `repository`: The GitHub repository to get the releases from. + - `n`: The number of releases to get. diff --git a/docs/snippets/providers/github_workflows-snippet-autogenerated.mdx b/docs/snippets/providers/github_workflows-snippet-autogenerated.mdx new file mode 100644 index 0000000000..de5fec536a --- /dev/null +++ b/docs/snippets/providers/github_workflows-snippet-autogenerated.mdx @@ -0,0 +1,39 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **personal_access_token**: Github Personal Access Token (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query github_workflows + provider: github_workflows + config: "{{ provider.my_provider_name }}" + with: + url: {value} + method: {value} +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query github_workflows + provider: github_workflows + config: "{{ provider.my_provider_name }}" + with: + github_url: {value} + github_method: {value} +``` + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/gitlab-snippet-autogenerated.mdx b/docs/snippets/providers/gitlab-snippet-autogenerated.mdx new file mode 100644 index 0000000000..bb96cc068a --- /dev/null +++ b/docs/snippets/providers/gitlab-snippet-autogenerated.mdx @@ -0,0 +1,36 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **host**: GitLab Host (required: True, sensitive: False) +- **personal_access_token**: GitLab Personal Access Token (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **api**: Authenticated with api scope (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query gitlab + provider: gitlab + config: "{{ provider.my_provider_name }}" + with: + id: {value} + title: {value} + description: {value} + labels: {value} + issue_type: {value} +``` + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/gitlabpipelines-snippet-autogenerated.mdx b/docs/snippets/providers/gitlabpipelines-snippet-autogenerated.mdx new file mode 100644 index 0000000000..c50796e3f4 --- /dev/null +++ b/docs/snippets/providers/gitlabpipelines-snippet-autogenerated.mdx @@ -0,0 +1,39 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **access_token**: GitLab Access Token (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query gitlabpipelines + provider: gitlabpipelines + config: "{{ provider.my_provider_name }}" + with: + url: {value} + method: {value} +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query gitlabpipelines + provider: gitlabpipelines + config: "{{ provider.my_provider_name }}" + with: + gitlab_url: {value} + gitlab_method: {value} +``` + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/gke-snippet-autogenerated.mdx b/docs/snippets/providers/gke-snippet-autogenerated.mdx new file mode 100644 index 0000000000..926cae5d61 --- /dev/null +++ b/docs/snippets/providers/gke-snippet-autogenerated.mdx @@ -0,0 +1,62 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **service_account_json**: The service account JSON with container.viewer role (required: True, sensitive: True) +- **cluster_name**: The name of the cluster (required: True, sensitive: False) +- **region**: The GKE cluster region (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **roles/container.viewer**: Read access to GKE resources (mandatory) +- **pods:delete**: Required to delete/restart pods ([Documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)) +- **deployments:scale**: Required to scale deployments ([Documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)) +- **pods:list**: Required to list pods ([Documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)) +- **pods:get**: Required to get pod details ([Documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)) +- **pods:logs**: Required to get pod logs ([Documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query gke + provider: gke + config: "{{ provider.my_provider_name }}" + with: + command_type: {value} # Type of query to execute + # Additional arguments will be passed to the query method +``` + + + + + +Check the following workflow example: +- [gke.yml](https://github.com/keephq/keep/blob/main/examples/workflows/gke.yml) + + +## Provider Methods +The provider exposes the following [Provider Methods](/providers/provider-methods#via-ai-assistant). They are available in the [AI Assistant](/overview/ai-incident-assistant). + +- **get_pods** List all pods in a namespace or across all namespaces (view, scopes: pods:list, pods:get) + +- **get_pvc** List all PVCs in a namespace or across all namespaces (view, scopes: pods:list) + +- **get_node_pressure** Get pressure metrics for all nodes (view, scopes: pods:list) + +- **exec_command** Execute a command in a pod (action, scopes: pods:exec) + +- **restart_pod** Restart a pod by deleting it (action, scopes: pods:delete) + +- **get_deployment** Get deployment information (view, scopes: pods:list) + +- **scale_deployment** Scale a deployment to specified replicas (action, scopes: deployments:scale) + +- **get_pod_logs** Get logs from a pod (view, scopes: pods:logs) + diff --git a/docs/snippets/providers/google_chat-snippet-autogenerated.mdx b/docs/snippets/providers/google_chat-snippet-autogenerated.mdx new file mode 100644 index 0000000000..6d8d4ea06a --- /dev/null +++ b/docs/snippets/providers/google_chat-snippet-autogenerated.mdx @@ -0,0 +1,27 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **webhook_url**: Google Chat Webhook Url (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query google_chat + provider: google_chat + config: "{{ provider.my_provider_name }}" + with: + message: {value} # The text message to send. +``` + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/grafana-snippet-autogenerated.mdx b/docs/snippets/providers/grafana-snippet-autogenerated.mdx new file mode 100644 index 0000000000..d74c069934 --- /dev/null +++ b/docs/snippets/providers/grafana-snippet-autogenerated.mdx @@ -0,0 +1,42 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **token**: Token (required: True, sensitive: True) +- **host**: Grafana host (required: True, sensitive: False) +- **datasource_uid**: Datasource UID (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **alert.rules:read**: Read Grafana alert rules in a folder and its subfolders. (mandatory) ([Documentation](https://grafana.com/docs/grafana/latest/administration/roles-and-permissions/access-control/custom-role-actions-scopes/)) +- **alert.provisioning:read**: Read all Grafana alert rules, notification policies, etc via provisioning API. ([Documentation](https://grafana.com/docs/grafana/latest/administration/roles-and-permissions/access-control/custom-role-actions-scopes/)) +- **alert.provisioning:write**: Update all Grafana alert rules, notification policies, etc via provisioning API. ([Documentation](https://grafana.com/docs/grafana/latest/administration/roles-and-permissions/access-control/custom-role-actions-scopes/)) + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + + + +## Topology +This provider pulls [topology](/overview/servicetopology) to Keep. It could be used in [correlations](/overview/correlation-topology) +and [mapping](/overview/enrichment/mapping#mapping-with-topology-data), and as a context +for [alerts](/alerts/sidebar#7-alert-topology-view) and [incidents](/overview#17-incident-topology). +## Connecting via Webhook (omnidirectional) +This provider supports webhooks. + +If your Grafana is unreachable from Keep, you can use the following webhook url to configure Grafana to send alerts to Keep: + + 1. In Grafana, go to the Alerting tab in the Grafana dashboard. + 2. Click on Contact points in the left sidebar and create a new one. + 3. Give it a name and select Webhook as kind of contact point with webhook url as KEEP_BACKEND_URL/alerts/event/grafana. + 4. Add 'X-API-KEY' as the request header {api_key}. + 5. Save the webhook. + 6. Click on Notification policies in the left sidebar + 7. Click on "New child policy" under the "Default policy" + 8. Remove all matchers until you see the following: "If no matchers are specified, this notification policy will handle all alert instances." + 9. Chose the webhook contact point you have just created under Contact point and click "Save Policy" + diff --git a/docs/snippets/providers/grafana_incident-snippet-autogenerated.mdx b/docs/snippets/providers/grafana_incident-snippet-autogenerated.mdx new file mode 100644 index 0000000000..ce6975d9e1 --- /dev/null +++ b/docs/snippets/providers/grafana_incident-snippet-autogenerated.mdx @@ -0,0 +1,36 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **host_url**: Grafana Host URL (required: True, sensitive: False) +- **service_account_token**: Service Account Token (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: User is Authenticated + + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query grafana_incident + provider: grafana_incident + config: "{{ provider.my_provider_name }}" + with: + operationType: {value} + updateType: {value} +``` + + + + +Check the following workflow examples: +- [create-new-incident-grafana-incident.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/create-new-incident-grafana-incident.yaml) +- [update-incident-grafana-incident.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/update-incident-grafana-incident.yaml) diff --git a/docs/snippets/providers/grafana_loki-snippet-autogenerated.mdx b/docs/snippets/providers/grafana_loki-snippet-autogenerated.mdx new file mode 100644 index 0000000000..24a2860773 --- /dev/null +++ b/docs/snippets/providers/grafana_loki-snippet-autogenerated.mdx @@ -0,0 +1,47 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **host_url**: Grafana Loki Host URL (required: True, sensitive: False) +- **verify**: Enable SSL verification (required: False, sensitive: False) +- **authentication_type**: Authentication Type (required: True, sensitive: False) +- **username**: HTTP basic authentication - Username (required: False, sensitive: False) +- **password**: HTTP basic authentication - Password (required: False, sensitive: True) +- **x_scope_orgid**: X-Scope-OrgID Header Authentication (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: Instance is valid and user is authenticated + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query grafana_loki + provider: grafana_loki + config: "{{ provider.my_provider_name }}" + with: + query: {value} + limit: {value} + time: {value} + direction: {value} + start: {value} + end: {value} + since: {value} + step: {value} + interval: {value} + queryType: {value} +``` + + + + + +Check the following workflow example: +- [query_grafana_loki.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/query_grafana_loki.yaml) diff --git a/docs/snippets/providers/grafana_oncall-snippet-autogenerated.mdx b/docs/snippets/providers/grafana_oncall-snippet-autogenerated.mdx new file mode 100644 index 0000000000..978b0e17b1 --- /dev/null +++ b/docs/snippets/providers/grafana_oncall-snippet-autogenerated.mdx @@ -0,0 +1,33 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **token**: Token (required: True, sensitive: False) +- **host**: Grafana OnCall Host (required: True, sensitive: False) + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query grafana_oncall + provider: grafana_oncall + config: "{{ provider.my_provider_name }}" + with: + title: {value} + alert_uid: {value} + message: {value} + image_url: {value} + state: {value} + link_to_upstream_details: {value} +``` + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/graylog-snippet-autogenerated.mdx b/docs/snippets/providers/graylog-snippet-autogenerated.mdx new file mode 100644 index 0000000000..e0bd841dfa --- /dev/null +++ b/docs/snippets/providers/graylog-snippet-autogenerated.mdx @@ -0,0 +1,74 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **graylog_user_name**: Username (required: True, sensitive: False) +- **graylog_access_token**: Graylog Access Token (required: True, sensitive: True) +- **deployment_url**: Deployment Url (required: True, sensitive: False) +- **verify**: Verify SSL certificates (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: Mandatory for all operations, ensures the user is authenticated. (mandatory) +- **authorized**: Mandatory for querying incidents and managing resources, ensures the user has `Admin` privileges. (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query graylog + provider: graylog + config: "{{ provider.my_provider_name }}" + with: + events_search_parameters: {value} +``` + + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). + + +## Provider Methods +The provider exposes the following [Provider Methods](/providers/provider-methods#via-ai-assistant). They are available in the [AI Assistant](/overview/ai-incident-assistant). + +- **search** Search using elastic query language in Graylog (action, scopes: authorized) + + - `query`: The query string to search for. + - `query_type`: The type of query to use. Default is "elastic". + - `timerange_seconds`: The time range in seconds. Default is 300 seconds. + - `timerange_type`: The type of time range. Default is "relative". + - `page`: Page number, starting from 0. + - `per_page`: Number of results per page. + +## Connecting via Webhook (omnidirectional) +This provider supports webhooks. + + +To send alerts from Graylog to Keep, Use the following webhook url to configure Graylog send alerts to Keep: + +1. In Graylog, from the Topbar, go to `Alerts` > `Notifications`. +2. Click "Create Notification". +3. In the New Notification form, configure: + +**Note**: For Graylog v4.x please set the **URL** to `KEEP_BACKEND_URL/alerts/event/graylog?api_key={api_key}`. + +- **Display Name**: keep-graylog-webhook-integration +- **Title**: keep-graylog-webhook-integration +- **Notification Type**: Custom HTTP Notification +- **URL**: KEEP_BACKEND_URL/alerts/event/graylog # Whitelist this URL +- **Headers**: X-API-KEY:{api_key} +4. Erase the Body Template. +5. Click on "Create Notification". +6. Go the the `Event Definitions` tab, and select the Event Definition that will trigger the alert you want to send to Keep and click on More > Edit. +7. Go to "Notifications" tab. +8. Click on "Add Notification" and select the "keep-graylog-webhook-integration" that you created in step 3. +9. Click on "Add Notification". +10. Click `Next` > `Update` event definition + diff --git a/docs/snippets/providers/grok-snippet-autogenerated.mdx b/docs/snippets/providers/grok-snippet-autogenerated.mdx new file mode 100644 index 0000000000..0913222317 --- /dev/null +++ b/docs/snippets/providers/grok-snippet-autogenerated.mdx @@ -0,0 +1,30 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: X.AI Grok API Key (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query grok + provider: grok + config: "{{ provider.my_provider_name }}" + with: + prompt: {value} + model: {value} + max_tokens: {value} + structured_output_format: {value} +``` + + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/http-snippet-autogenerated.mdx b/docs/snippets/providers/http-snippet-autogenerated.mdx new file mode 100644 index 0000000000..1dffe5a06b --- /dev/null +++ b/docs/snippets/providers/http-snippet-autogenerated.mdx @@ -0,0 +1,62 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query http + provider: http + config: "{{ provider.my_provider_name }}" + with: + url: {value} + method: {value} + headers: {value} + body: {value} + params: {value} + proxies: {value} + fail_on_error: {value} + verify: {value} +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query http + provider: http + config: "{{ provider.my_provider_name }}" + with: + url: {value} + method: {value} + headers: {value} + body: {value} + params: {value} + proxies: {value} + verify: {value} +``` + + + + +Check the following workflow examples: +- [create-new-incident-grafana-incident.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/create-new-incident-grafana-incident.yaml) +- [db_disk_space_monitor.yml](https://github.com/keephq/keep/blob/main/examples/workflows/db_disk_space_monitor.yml) +- [http_enrich.yml](https://github.com/keephq/keep/blob/main/examples/workflows/http_enrich.yml) +- [ifelse.yml](https://github.com/keephq/keep/blob/main/examples/workflows/ifelse.yml) +- [incident-enrich.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/incident-enrich.yaml) +- [pagerduty.yml](https://github.com/keephq/keep/blob/main/examples/workflows/pagerduty.yml) +- [permissions_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/permissions_example.yml) +- [send-message-telegram-with-htmlmd.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/send-message-telegram-with-htmlmd.yaml) +- [simple_http_request_ntfy.yml](https://github.com/keephq/keep/blob/main/examples/workflows/simple_http_request_ntfy.yml) +- [slack-workflow-trigger.yml](https://github.com/keephq/keep/blob/main/examples/workflows/slack-workflow-trigger.yml) +- [telegram_basic.yml](https://github.com/keephq/keep/blob/main/examples/workflows/telegram_basic.yml) +- [update-incident-grafana-incident.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/update-incident-grafana-incident.yaml) +- [update_workflows_from_http.yml](https://github.com/keephq/keep/blob/main/examples/workflows/update_workflows_from_http.yml) +- [webhook_example_foreach.yml](https://github.com/keephq/keep/blob/main/examples/workflows/webhook_example_foreach.yml) +- [zoom_chat_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/zoom_chat_example.yml) diff --git a/docs/snippets/providers/icinga2-snippet-autogenerated.mdx b/docs/snippets/providers/icinga2-snippet-autogenerated.mdx new file mode 100644 index 0000000000..0bfaa0b77a --- /dev/null +++ b/docs/snippets/providers/icinga2-snippet-autogenerated.mdx @@ -0,0 +1,17 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **host_url**: Icinga2 Host URL (required: True, sensitive: False) +- **api_user**: Icinga2 API User (required: True, sensitive: False) +- **api_password**: Icinga2 API Password (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **read_alerts**: Read alerts from Icinga2 + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/ilert-snippet-autogenerated.mdx b/docs/snippets/providers/ilert-snippet-autogenerated.mdx new file mode 100644 index 0000000000..55ca22a47d --- /dev/null +++ b/docs/snippets/providers/ilert-snippet-autogenerated.mdx @@ -0,0 +1,57 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **ilert_token**: ILert API token (required: True, sensitive: True) +- **ilert_host**: ILert API host (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **read_permission**: Read permission (mandatory) +- **write_permission**: Write permission + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query ilert + provider: ilert + config: "{{ provider.my_provider_name }}" + with: + incident_id: {value} +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query ilert + provider: ilert + config: "{{ provider.my_provider_name }}" + with: + _type: {value} # Type of notification ('incident' or 'event') - determines which endpoint is used + summary: {value} # A brief summary of the incident (required for new incidents) + status: {value} # Current status of the incident (INVESTIGATING, RESOLVED, MONITORING, IDENTIFIED) + message: {value} # Detailed message describing the incident (default: empty string) + affectedServices: {value} # JSON string of affected services and their statuses (default: "[]") + id: {value} # ID of incident to update (use "0" to create a new incident) + event_type: {value} # Type of event to post (ALERT, ACCEPT, RESOLVE) + details: {value} # Detailed information about the event + alert_key: {value} # Unique key for event deduplication + priority: {value} # Priority level of the event (HIGH, LOW) + images: {value} # List of image URLs to include with the event + links: {value} # List of related links to include with the event + custom_details: {value} # Custom key-value pairs for additional context +``` + + + + +Check the following workflow example: +- [ilert-incident-upon-alert.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/ilert-incident-upon-alert.yaml) diff --git a/docs/snippets/providers/incidentio-snippet-autogenerated.mdx b/docs/snippets/providers/incidentio-snippet-autogenerated.mdx new file mode 100644 index 0000000000..f416e3eb19 --- /dev/null +++ b/docs/snippets/providers/incidentio-snippet-autogenerated.mdx @@ -0,0 +1,32 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **incidentIoApiKey**: IncidentIO's API_KEY (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: User is Authenticated (mandatory) +- **read_access**: User has read access (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query incidentio + provider: incidentio + config: "{{ provider.my_provider_name }}" + with: + incident_id: {value} +``` + + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/incidentmanager-snippet-autogenerated.mdx b/docs/snippets/providers/incidentmanager-snippet-autogenerated.mdx new file mode 100644 index 0000000000..b0df57533d --- /dev/null +++ b/docs/snippets/providers/incidentmanager-snippet-autogenerated.mdx @@ -0,0 +1,39 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **region**: AWS region (required: True, sensitive: False) +- **response_plan_arn**: AWS Response Plan's arn (required: True, sensitive: False) +- **sns_topic_arn**: AWS SNS Topic arn you want to be used/using in response plan (required: True, sensitive: False) +- **access_key**: AWS access key (Leave empty if using IAM role at EC2) (required: False, sensitive: True) +- **access_key_secret**: AWS access key secret (Leave empty if using IAM role at EC2) (required: False, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **ssm-incidents:ListIncidentRecords**: Required to retrieve incidents. (mandatory) ([Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm-incidents.html)) +- **ssm-incidents:GetResponsePlan**: Required to get response plan and register keep as webhook ([Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm-incidents.html)) +- **ssm-incidents:UpdateResponsePlan**: Required to update response plan and register keep as webhook ([Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm-incidents.html)) +- **iam:SimulatePrincipalPolicy**: Allow Keep to test the scopes of the current user/role without modifying any resource. ([Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm-incidents.html)) +- **sns:ListSubscriptionsByTopic**: Required to list all subscriptions of a topic, so Keep will be able to add itself as a subscription. ([Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm-incidents.html)) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query incidentmanager + provider: incidentmanager + config: "{{ provider.my_provider_name }}" + + +``` + + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/jira-snippet-autogenerated.mdx b/docs/snippets/providers/jira-snippet-autogenerated.mdx new file mode 100644 index 0000000000..2375039b55 --- /dev/null +++ b/docs/snippets/providers/jira-snippet-autogenerated.mdx @@ -0,0 +1,69 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **email**: Atlassian Jira Email (required: True, sensitive: False) +- **api_token**: Atlassian Jira API Token (required: True, sensitive: True) +- **host**: Atlassian Jira Host (required: True, sensitive: False) +- **ticket_creation_url**: URL for creating new tickets (optional, will use default if not provided) (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **BROWSE_PROJECTS**: Browse Jira Projects (mandatory) +- **CREATE_ISSUES**: Create Jira Issues (mandatory) +- **CLOSE_ISSUES**: Close Jira Issues +- **EDIT_ISSUES**: Edit Jira Issues +- **DELETE_ISSUES**: Delete Jira Issues +- **MODIFY_REPORTER**: Modify Jira Issue Reporter +- **TRANSITION_ISSUES**: Transition Jira Issues + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query jira + provider: jira + config: "{{ provider.my_provider_name }}" + with: + ticket_id: {value} # The ticket id of the issue, optional. + board_id: {value} # The board id of the issue. +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query jira + provider: jira + config: "{{ provider.my_provider_name }}" + with: + summary: {value} # The summary of the issue. + description: {value} # The description of the issue. + issue_type: {value} # The type of the issue. + project_key: {value} # The project key of the issue. + board_name: {value} # The board name of the issue. + issue_id: {value} # The issue id of the issue. + labels: {value} # The labels of the issue. + components: {value} # The components of the issue. + custom_fields: {value} # The custom fields of the issue. + transition_to: {value} # Optional transition name (e.g., "Done", "Resolved") to apply after update/create. +``` + + + + +Check the following workflow examples: +- [create_jira_ticket_upon_alerts.yml](https://github.com/keephq/keep/blob/main/examples/workflows/create_jira_ticket_upon_alerts.yml) +- [incident-enrich.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/incident-enrich.yaml) +- [jira-create-ticket-on-alert.yml](https://github.com/keephq/keep/blob/main/examples/workflows/jira-create-ticket-on-alert.yml) +- [jira-transition-on-resolved.yml](https://github.com/keephq/keep/blob/main/examples/workflows/jira-transition-on-resolved.yml) +- [jira_on_prem.yml](https://github.com/keephq/keep/blob/main/examples/workflows/jira_on_prem.yml) +- [test_jira_create_with_custom_fields.yml](https://github.com/keephq/keep/blob/main/examples/workflows/test_jira_create_with_custom_fields.yml) +- [test_jira_custom_fields_fix.yml](https://github.com/keephq/keep/blob/main/examples/workflows/test_jira_custom_fields_fix.yml) +- [update_jira_ticket.yml](https://github.com/keephq/keep/blob/main/examples/workflows/update_jira_ticket.yml) diff --git a/docs/snippets/providers/jiraonprem-snippet-autogenerated.mdx b/docs/snippets/providers/jiraonprem-snippet-autogenerated.mdx new file mode 100644 index 0000000000..29b1b6166d --- /dev/null +++ b/docs/snippets/providers/jiraonprem-snippet-autogenerated.mdx @@ -0,0 +1,60 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **host**: Jira Host (required: True, sensitive: False) +- **personal_access_token**: Jira PAT (required: True, sensitive: True) +- **ticket_creation_url**: URL for creating new tickets (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **BROWSE_PROJECTS**: Browse Jira Projects (mandatory) +- **CREATE_ISSUES**: Create Jira Issues (mandatory) +- **CLOSE_ISSUES**: Close Jira Issues +- **EDIT_ISSUES**: Edit Jira Issues +- **DELETE_ISSUES**: Delete Jira Issues +- **MODIFY_REPORTER**: Modify Jira Issue Reporter + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query jiraonprem + provider: jiraonprem + config: "{{ provider.my_provider_name }}" + with: + ticket_id: {value} # The ticket id. + board_id: {value} # The board id. +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query jiraonprem + provider: jiraonprem + config: "{{ provider.my_provider_name }}" + with: + summary: {value} + description: {value} + issue_type: {value} + project_key: {value} + board_name: {value} + issue_id: {value} + labels: {value} + components: {value} + custom_fields: {value} + priority: {value} +``` + + + + +Check the following workflow example: +- [jira_on_prem.yml](https://github.com/keephq/keep/blob/main/examples/workflows/jira_on_prem.yml) diff --git a/docs/snippets/providers/kafka-snippet-autogenerated.mdx b/docs/snippets/providers/kafka-snippet-autogenerated.mdx new file mode 100644 index 0000000000..ebef66ee49 --- /dev/null +++ b/docs/snippets/providers/kafka-snippet-autogenerated.mdx @@ -0,0 +1,20 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **host**: Kafka host (required: True, sensitive: False) +- **topic**: The topic to subscribe to (required: True, sensitive: False) +- **username**: Username (required: False, sensitive: True) +- **password**: Password (required: False, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **topic_read**: The kafka user that have permissions to read the topic. (mandatory) + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/keep-snippet-autogenerated.mdx b/docs/snippets/providers/keep-snippet-autogenerated.mdx new file mode 100644 index 0000000000..2514b75766 --- /dev/null +++ b/docs/snippets/providers/keep-snippet-autogenerated.mdx @@ -0,0 +1,61 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query keep + provider: keep + config: "{{ provider.my_provider_name }}" + with: + filters: {value} # filters to query Keep (only for version 1) + version: {value} # version of Keep API + distinct: {value} # if True, return only distinct alerts + time_delta: {value} # time delta in days to query Keep + timerange: {value} # timerange dict to calculate time delta + filter: {value} # filter to query Keep (only for version 2) + limit: {value} # limit number of results (only for version 2) +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query keep + provider: keep + config: "{{ provider.my_provider_name }}" + with: + delete_all_other_workflows: {value} # if True, delete all other workflows + workflow_full_sync: {value} # if True, sync all workflows + workflow_to_update_yaml: {value} # workflow yaml to update + alert: {value} # alert data to create + fingerprint_fields: {value} # fields to use for alert fingerprinting + override_source_with: {value} # override alert source + read_only: {value} # if True, don't modify existing alerts + fingerprint: {value} # alert fingerprint + if: {value} # condition to evaluate for alert creation + for: {value} # duration for state alerts +``` + + + + +Check the following workflow examples: +- [create_alert_from_vm_metric.yml](https://github.com/keephq/keep/blob/main/examples/workflows/create_alert_from_vm_metric.yml) +- [create_alert_in_keep.yml](https://github.com/keephq/keep/blob/main/examples/workflows/create_alert_in_keep.yml) +- [create_alerts_from_elastic.yml](https://github.com/keephq/keep/blob/main/examples/workflows/create_alerts_from_elastic.yml) +- [create_alerts_from_mysql.yml](https://github.com/keephq/keep/blob/main/examples/workflows/create_alerts_from_mysql.yml) +- [create_multi_alert_from_vm_metric.yml](https://github.com/keephq/keep/blob/main/examples/workflows/create_multi_alert_from_vm_metric.yml) +- [fluxcd_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/fluxcd_example.yml) +- [resolve_old_alerts.yml](https://github.com/keephq/keep/blob/main/examples/workflows/resolve_old_alerts.yml) +- [retrieve_cloudwatch_logs.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/retrieve_cloudwatch_logs.yaml) +- [update_service_now_tickets_status.yml](https://github.com/keephq/keep/blob/main/examples/workflows/update_service_now_tickets_status.yml) +- [update_workflows_from_http.yml](https://github.com/keephq/keep/blob/main/examples/workflows/update_workflows_from_http.yml) +- [update_workflows_from_s3.yml](https://github.com/keephq/keep/blob/main/examples/workflows/update_workflows_from_s3.yml) +- [webhook_example_foreach.yml](https://github.com/keephq/keep/blob/main/examples/workflows/webhook_example_foreach.yml) diff --git a/docs/snippets/providers/kibana-snippet-autogenerated.mdx b/docs/snippets/providers/kibana-snippet-autogenerated.mdx new file mode 100644 index 0000000000..697eb85f3c --- /dev/null +++ b/docs/snippets/providers/kibana-snippet-autogenerated.mdx @@ -0,0 +1,22 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: Kibana API Key (required: True, sensitive: True) +- **kibana_host**: Kibana Host (required: True, sensitive: False) +- **kibana_port**: Kibana Port (defaults to 9243) (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **rulesSettings:read**: Read alerts (mandatory) +- **rulesSettings:write**: Modify alerts (mandatory) +- **actions:read**: Read connectors (mandatory) +- **actions:write**: Write connectors (mandatory) + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/kubernetes-snippet-autogenerated.mdx b/docs/snippets/providers/kubernetes-snippet-autogenerated.mdx new file mode 100644 index 0000000000..1600d818ca --- /dev/null +++ b/docs/snippets/providers/kubernetes-snippet-autogenerated.mdx @@ -0,0 +1,70 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_server**: The kubernetes api server url (required: False, sensitive: False) +- **token**: Bearer token to access kubernetes (leave empty for in-cluster auth) (required: False, sensitive: True) +- **insecure**: Skip TLS verification (required: False, sensitive: False) +- **use_in_cluster_config**: Use in-cluster configuration (ServiceAccount) (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **connect_to_kubernetes**: Check if the provided token can connect to the kubernetes server (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query kubernetes + provider: kubernetes + config: "{{ provider.my_provider_name }}" + with: + command_type: {value} # The type of query to perform. Supported queries are: +- get_logs: Get logs from a pod +- get_deployment_logs: Get logs from all pods in a deployment +- get_events: Get events for a namespace or pod +- get_nodes: List nodes +- get_pods: List pods +- get_node_pressure: Get node pressure conditions +- get_pvc: List persistent volume claims +- get_deployments: List deployments +- get_statefulsets: List statefulsets +- get_daemonsets: List daemonsets +- get_services: List services +- get_namespaces: List namespaces +- get_ingresses: List ingresses for a namespace or all namespaces +- get_jobs: List jobs + # Additional arguments for the query. +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query kubernetes + provider: kubernetes + config: "{{ provider.my_provider_name }}" + with: + action: {value} # The action to perform. Supported actions are: +- rollout_restart: Restart a deployment/statefulset/daemonset +- restart_pod: Restart a specific pod +- cordon_node: Mark node as unschedulable +- uncordon_node: Mark node as schedulable +- drain_node: Safely evict pods from node +- scale_deployment: Scale deployment up/down +- scale_statefulset: Scale statefulset up/down +- exec_pod_command: Execute command in pod + # Additional arguments for the action. +``` + + + + +Check the following workflow example: +- [gke.yml](https://github.com/keephq/keep/blob/main/examples/workflows/gke.yml) diff --git a/docs/snippets/providers/libre_nms-snippet-autogenerated.mdx b/docs/snippets/providers/libre_nms-snippet-autogenerated.mdx new file mode 100644 index 0000000000..3da673b053 --- /dev/null +++ b/docs/snippets/providers/libre_nms-snippet-autogenerated.mdx @@ -0,0 +1,18 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **host_url**: LibreNMS Host URL (required: True, sensitive: False) +- **api_key**: LibreNMS API Key (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **read_alerts**: Read alerts from LibreNMS + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/linear-snippet-autogenerated.mdx b/docs/snippets/providers/linear-snippet-autogenerated.mdx new file mode 100644 index 0000000000..b44a5bee1c --- /dev/null +++ b/docs/snippets/providers/linear-snippet-autogenerated.mdx @@ -0,0 +1,42 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_token**: Linear API Token (required: True, sensitive: True) +- **ticket_creation_url**: URL for creating new tickets (required: False, sensitive: False) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query linear + provider: linear + config: "{{ provider.my_provider_name }}" + with: + team_name: {value} +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query linear + provider: linear + config: "{{ provider.my_provider_name }}" + with: + team_name: {value} + project_name: {value} + title: {value} + description: {value} + priority: {value} +``` + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/linearb-snippet-autogenerated.mdx b/docs/snippets/providers/linearb-snippet-autogenerated.mdx new file mode 100644 index 0000000000..71764f4336 --- /dev/null +++ b/docs/snippets/providers/linearb-snippet-autogenerated.mdx @@ -0,0 +1,41 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_token**: LinearB API Token (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **any**: A way to validate the provider (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query linearb + provider: linearb + config: "{{ provider.my_provider_name }}" + with: + incident_id: {value} + http_url: {value} + title: {value} + teams: {value} + repository_urls: {value} + services: {value} + started_at: {value} + ended_at: {value} + git_ref: {value} + should_delete: {value} + issued_at: {value} +``` + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/litellm-snippet-autogenerated.mdx b/docs/snippets/providers/litellm-snippet-autogenerated.mdx new file mode 100644 index 0000000000..7cfcb9c122 --- /dev/null +++ b/docs/snippets/providers/litellm-snippet-autogenerated.mdx @@ -0,0 +1,34 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_url**: LiteLLM API endpoint URL (required: True, sensitive: False) +- **api_key**: Optional API key if your LiteLLM deployment requires authentication (required: False, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query litellm + provider: litellm + config: "{{ provider.my_provider_name }}" + with: + prompt: {value} + temperature: {value} + model: {value} + max_tokens: {value} + structured_output_format: {value} +``` + + + + + +Check the following workflow example: +- [enrich_using_structured_output_from_openai.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/enrich_using_structured_output_from_openai.yaml) diff --git a/docs/snippets/providers/llamacpp-snippet-autogenerated.mdx b/docs/snippets/providers/llamacpp-snippet-autogenerated.mdx new file mode 100644 index 0000000000..a918856860 --- /dev/null +++ b/docs/snippets/providers/llamacpp-snippet-autogenerated.mdx @@ -0,0 +1,28 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **host**: Llama.cpp Server Host URL (required: True, sensitive: False) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query llamacpp + provider: llamacpp + config: "{{ provider.my_provider_name }}" + with: + prompt: {value} + max_tokens: {value} +``` + + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/mailgun-snippet-autogenerated.mdx b/docs/snippets/providers/mailgun-snippet-autogenerated.mdx new file mode 100644 index 0000000000..521dc778dd --- /dev/null +++ b/docs/snippets/providers/mailgun-snippet-autogenerated.mdx @@ -0,0 +1,16 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **email**: Email address to send alerts to (required: False, sensitive: False) +- **sender**: Sender email address to validate (required: False, sensitive: False) +- **email_domain**: Custom email domain for receiving alerts (required: False, sensitive: False) +- **extraction**: Extraction Rules (required: False, sensitive: False) + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/mattermost-snippet-autogenerated.mdx b/docs/snippets/providers/mattermost-snippet-autogenerated.mdx new file mode 100644 index 0000000000..901cf57e6e --- /dev/null +++ b/docs/snippets/providers/mattermost-snippet-autogenerated.mdx @@ -0,0 +1,29 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **webhook_url**: Mattermost Webhook Url (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query mattermost + provider: mattermost + config: "{{ provider.my_provider_name }}" + with: + message: {value} # The content of the message. + attachments: {value} # The attachments of the message. + channel: {value} # The channel to send the message +``` + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/mock-snippet-autogenerated.mdx b/docs/snippets/providers/mock-snippet-autogenerated.mdx new file mode 100644 index 0000000000..e89027406c --- /dev/null +++ b/docs/snippets/providers/mock-snippet-autogenerated.mdx @@ -0,0 +1,43 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query mock + provider: mock + config: "{{ provider.my_provider_name }}" + with: + # Just will return all parameters passed to it. +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query mock + provider: mock + config: "{{ provider.my_provider_name }}" + with: + # Just will return all parameters passed to it. +``` + + + + +Check the following workflow examples: +- [autosupress.yml](https://github.com/keephq/keep/blob/main/examples/workflows/autosupress.yml) +- [businesshours.yml](https://github.com/keephq/keep/blob/main/examples/workflows/businesshours.yml) +- [datadog-log-monitor.yml](https://github.com/keephq/keep/blob/main/examples/workflows/datadog-log-monitor.yml) +- [db_disk_space_monitor.yml](https://github.com/keephq/keep/blob/main/examples/workflows/db_disk_space_monitor.yml) +- [enrich_using_structured_output_from_deepseek.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/enrich_using_structured_output_from_deepseek.yaml) +- [enrich_using_structured_output_from_openai.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/enrich_using_structured_output_from_openai.yaml) +- [enrich_using_structured_output_from_vllm_qwen.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/enrich_using_structured_output_from_vllm_qwen.yaml) +- [ilert-incident-upon-alert.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/ilert-incident-upon-alert.yaml) +- [resolve_old_alerts.yml](https://github.com/keephq/keep/blob/main/examples/workflows/resolve_old_alerts.yml) diff --git a/docs/snippets/providers/monday-snippet-autogenerated.mdx b/docs/snippets/providers/monday-snippet-autogenerated.mdx new file mode 100644 index 0000000000..9a2bda366a --- /dev/null +++ b/docs/snippets/providers/monday-snippet-autogenerated.mdx @@ -0,0 +1,38 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_token**: Personal API Token (required: False, sensitive: True) +- **access_token**: For access token installation flow, use Keep UI (required: False, sensitive: True) +- **scopes**: Scopes from OAuth logic, comma separated (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **create_pulse**: Create a new pulse + + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query monday + provider: monday + config: "{{ provider.my_provider_name }}" + with: + board_id: {value} + group_id: {value} + item_name: {value} + column_values: {value} +``` + + + + +Check the following workflow example: +- [monday_create_pulse.yml](https://github.com/keephq/keep/blob/main/examples/workflows/monday_create_pulse.yml) diff --git a/docs/snippets/providers/mongodb-snippet-autogenerated.mdx b/docs/snippets/providers/mongodb-snippet-autogenerated.mdx new file mode 100644 index 0000000000..856ad24674 --- /dev/null +++ b/docs/snippets/providers/mongodb-snippet-autogenerated.mdx @@ -0,0 +1,40 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **host**: Mongo host_uri (required: True, sensitive: False) +- **username**: MongoDB username (required: False, sensitive: False) +- **password**: MongoDB password (required: False, sensitive: True) +- **database**: MongoDB database name (required: False, sensitive: False) +- **auth_source**: Mongo authSource database name (required: False, sensitive: False) +- **additional_options**: Mongo kwargs, these will be passed to MongoClient (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **connect_to_server**: The user can connect to the server (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query mongodb + provider: mongodb + config: "{{ provider.my_provider_name }}" + with: + query: {value} + as_dict: {value} + single_row: {value} +``` + + + + + +Check the following workflow example: +- [query_mongodb.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/query_mongodb.yaml) diff --git a/docs/snippets/providers/mysql-snippet-autogenerated.mdx b/docs/snippets/providers/mysql-snippet-autogenerated.mdx new file mode 100644 index 0000000000..507a90c715 --- /dev/null +++ b/docs/snippets/providers/mysql-snippet-autogenerated.mdx @@ -0,0 +1,58 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **username**: MySQL username (required: True, sensitive: False) +- **password**: MySQL password (required: True, sensitive: True) +- **host**: MySQL hostname (required: True, sensitive: False) +- **database**: MySQL database name (required: False, sensitive: False) +- **port**: MySQL port (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **connect_to_server**: The user can connect to the server (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query mysql + provider: mysql + config: "{{ provider.my_provider_name }}" + with: + query: {value} # Query to execute + as_dict: {value} # If True, returns the results as a list of dictionaries + single_row: {value} # If True, returns only the first row of the results + # Arguments will me passed to the query.format(**kwargs) +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query mysql + provider: mysql + config: "{{ provider.my_provider_name }}" + with: + query: {value} # Query to execute + as_dict: {value} # If True, returns the results as a list of dictionaries + single_row: {value} # If True, returns only the first row of the results + # Arguments will me passed to the query.format(**kwargs) +``` + + + + +Check the following workflow examples: +- [blogpost.yml](https://github.com/keephq/keep/blob/main/examples/workflows/blogpost.yml) +- [conditionally_run_if_ai_says_so.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/conditionally_run_if_ai_says_so.yaml) +- [create_alerts_from_mysql.yml](https://github.com/keephq/keep/blob/main/examples/workflows/create_alerts_from_mysql.yml) +- [raw_sql_query_datetime.yml](https://github.com/keephq/keep/blob/main/examples/workflows/raw_sql_query_datetime.yml) +- [simple_http_request_ntfy.yml](https://github.com/keephq/keep/blob/main/examples/workflows/simple_http_request_ntfy.yml) +- [slack-message-reaction.yml](https://github.com/keephq/keep/blob/main/examples/workflows/slack-message-reaction.yml) diff --git a/docs/snippets/providers/netbox-snippet-autogenerated.mdx b/docs/snippets/providers/netbox-snippet-autogenerated.mdx new file mode 100644 index 0000000000..8e1275f2d6 --- /dev/null +++ b/docs/snippets/providers/netbox-snippet-autogenerated.mdx @@ -0,0 +1,9 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/netdata-snippet-autogenerated.mdx b/docs/snippets/providers/netdata-snippet-autogenerated.mdx new file mode 100644 index 0000000000..27b78578bb --- /dev/null +++ b/docs/snippets/providers/netdata-snippet-autogenerated.mdx @@ -0,0 +1,29 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + + +## Connecting via Webhook (omnidirectional) +This provider supports webhooks. + + +To send alerts from Netdata to Keep, Use the following webhook url to configure Netdata send alerts to Keep: + +1. In Netdata, go to Space settings. +2. Go to "Alerts & Notifications". +3. Click on "Add configuration". +4. Add "Webhook" as the notification method. +5. Add a name to the configuration. +6. Select Room(s) to apply the configuration. +7. Select Notification(s) to apply the configuration. +8. In the "Webhook URL" field, add KEEP_BACKEND_URL/alerts/event/netdata. +9. Add a request header with the key "x-api-key" and the value as {api_key}. +10. Leave the Authentication as "No Authentication". +11. Add the "Challenge secret" as "keep-netdata-webhook-integration". +12. Save the configuration. + diff --git a/docs/snippets/providers/netxms-snippet-autogenerated.mdx b/docs/snippets/providers/netxms-snippet-autogenerated.mdx new file mode 100644 index 0000000000..dbb8604d5d --- /dev/null +++ b/docs/snippets/providers/netxms-snippet-autogenerated.mdx @@ -0,0 +1,13 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: NetXMS API key (required: True, sensitive: True) + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/newrelic-snippet-autogenerated.mdx b/docs/snippets/providers/newrelic-snippet-autogenerated.mdx new file mode 100644 index 0000000000..2ded3e487d --- /dev/null +++ b/docs/snippets/providers/newrelic-snippet-autogenerated.mdx @@ -0,0 +1,40 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: New Relic User key. To receive webhooks, use `User key` of an admin account (required: True, sensitive: True) +- **account_id**: New Relic account ID (required: True, sensitive: False) +- **new_relic_api_url**: New Relic API URL (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **ai.issues:read**: Required to read issues and related information (mandatory) ([Documentation](https://docs.newrelic.com/docs/accounts/accounts-billing/new-relic-one-user-management/user-management-concepts/)) +- **ai.destinations:read**: Required to read whether keep webhooks are registered ([Documentation](https://docs.newrelic.com/docs/accounts/accounts-billing/new-relic-one-user-management/user-management-concepts/)) +- **ai.destinations:write**: Required to register keep webhooks ([Documentation](https://docs.newrelic.com/docs/accounts/accounts-billing/new-relic-one-user-management/user-management-concepts/)) +- **ai.channels:read**: Required to know informations about notification channels. ([Documentation](https://docs.newrelic.com/docs/accounts/accounts-billing/new-relic-one-user-management/user-management-concepts/)) +- **ai.channels:write**: Required to create notification channel ([Documentation](https://docs.newrelic.com/docs/accounts/accounts-billing/new-relic-one-user-management/user-management-concepts/)) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query newrelic + provider: newrelic + config: "{{ provider.my_provider_name }}" + with: + nrql: {value} + query: {value} # query to execute +``` + + + + + +Check the following workflow example: +- [complex-conditions-cel.yml](https://github.com/keephq/keep/blob/main/examples/workflows/complex-conditions-cel.yml) diff --git a/docs/snippets/providers/ntfy-snippet-autogenerated.mdx b/docs/snippets/providers/ntfy-snippet-autogenerated.mdx new file mode 100644 index 0000000000..3745ef2adf --- /dev/null +++ b/docs/snippets/providers/ntfy-snippet-autogenerated.mdx @@ -0,0 +1,40 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **access_token**: Ntfy Access Token (required: False, sensitive: True) +- **host**: Ntfy Host URL (For self-hosted Ntfy only) (required: False, sensitive: False) +- **username**: Ntfy Username (For self-hosted Ntfy only) (required: False, sensitive: False) +- **password**: Ntfy Password (For self-hosted Ntfy only) (required: False, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **send_alert**: (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query ntfy + provider: ntfy + config: "{{ provider.my_provider_name }}" + with: + message: {value} + topic: {value} +``` + + + + +Check the following workflow examples: +- [ntfy_basic.yml](https://github.com/keephq/keep/blob/main/examples/workflows/ntfy_basic.yml) +- [query_clickhouse.yml](https://github.com/keephq/keep/blob/main/examples/workflows/query_clickhouse.yml) +- [query_victoriametrics.yml](https://github.com/keephq/keep/blob/main/examples/workflows/query_victoriametrics.yml) +- [simple_http_request_ntfy.yml](https://github.com/keephq/keep/blob/main/examples/workflows/simple_http_request_ntfy.yml) diff --git a/docs/snippets/providers/ollama-snippet-autogenerated.mdx b/docs/snippets/providers/ollama-snippet-autogenerated.mdx new file mode 100644 index 0000000000..f266d0d318 --- /dev/null +++ b/docs/snippets/providers/ollama-snippet-autogenerated.mdx @@ -0,0 +1,30 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **host**: Ollama API Host URL (required: True, sensitive: False) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query ollama + provider: ollama + config: "{{ provider.my_provider_name }}" + with: + prompt: {value} + model: {value} + max_tokens: {value} + structured_output_format: {value} +``` + + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/openai-snippet-autogenerated.mdx b/docs/snippets/providers/openai-snippet-autogenerated.mdx new file mode 100644 index 0000000000..0cf54c56bb --- /dev/null +++ b/docs/snippets/providers/openai-snippet-autogenerated.mdx @@ -0,0 +1,37 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: OpenAI Platform API Key (required: True, sensitive: True) +- **organization_id**: OpenAI Platform Organization ID (required: False, sensitive: False) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query openai + provider: openai + config: "{{ provider.my_provider_name }}" + with: + prompt: {value} + model: {value} + max_tokens: {value} + structured_output_format: {value} +``` + + + + + +Check the following workflow examples: +- [conditionally_run_if_ai_says_so.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/conditionally_run_if_ai_says_so.yaml) +- [enrich_using_structured_output_from_openai.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/enrich_using_structured_output_from_openai.yaml) +- [gcp_logging_open_ai.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/gcp_logging_open_ai.yaml) +- [send_slack_message_on_failure.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/send_slack_message_on_failure.yaml) +- [update-incident-grafana-incident.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/update-incident-grafana-incident.yaml) diff --git a/docs/snippets/providers/openobserve-snippet-autogenerated.mdx b/docs/snippets/providers/openobserve-snippet-autogenerated.mdx new file mode 100644 index 0000000000..8924d9a1f4 --- /dev/null +++ b/docs/snippets/providers/openobserve-snippet-autogenerated.mdx @@ -0,0 +1,21 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **openObserveUsername**: OpenObserve Username (required: True, sensitive: False) +- **openObservePassword**: Password (required: True, sensitive: True) +- **openObserveHost**: OpenObserve host url (required: True, sensitive: False) +- **openObservePort**: OpenObserve Port (required: True, sensitive: False) +- **organisationID**: OpenObserve organisationID (required: True, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: User is Authorized (mandatory) + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/opensearchserverless-snippet-autogenerated.mdx b/docs/snippets/providers/opensearchserverless-snippet-autogenerated.mdx new file mode 100644 index 0000000000..a0402971a2 --- /dev/null +++ b/docs/snippets/providers/opensearchserverless-snippet-autogenerated.mdx @@ -0,0 +1,55 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **domain_endpoint**: Domain endpoint (required: True, sensitive: False) +- **region**: AWS region (required: True, sensitive: False) +- **access_key**: AWS access key (required: False, sensitive: True) +- **access_key_secret**: AWS access key secret (required: False, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **iam:SimulatePrincipalPolicy**: Required to check if we have access to AOSS API. (mandatory) +- **aoss:APIAccessAll**: Required to make API calls to OpenSearch Serverless. (Add from IAM console) (mandatory) +- **aoss:ListAccessPolicies**: Required to access all Data Access Policies. (Add from IAM console) (mandatory) +- **aoss:GetAccessPolicy**: Required to check each policy for read and write scope. (Add from IAM console) (mandatory) +- **aoss:CreateIndex**: Required to create indexes while saving a doc. (mandatory) ([Documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/serverless-genref.html#serverless-operations)) +- **aoss:ReadDocument**: Required to query. (mandatory) ([Documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/serverless-genref.html#serverless-operations)) +- **aoss:WriteDocument**: Required to save documents. (mandatory) ([Documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/serverless-genref.html#serverless-operations)) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query opensearchserverless + provider: opensearchserverless + config: "{{ provider.my_provider_name }}" + with: + query: {value} + index: {value} +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query opensearchserverless + provider: opensearchserverless + config: "{{ provider.my_provider_name }}" + with: + index: {value} + document: {value} + doc_id: {value} +``` + + + + +Check the following workflow example: +- [opensearchserverless_basic.yml](https://github.com/keephq/keep/blob/main/examples/workflows/opensearchserverless_basic.yml) diff --git a/docs/snippets/providers/openshift-snippet-autogenerated.mdx b/docs/snippets/providers/openshift-snippet-autogenerated.mdx new file mode 100644 index 0000000000..c31758e7f0 --- /dev/null +++ b/docs/snippets/providers/openshift-snippet-autogenerated.mdx @@ -0,0 +1,61 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_server**: The openshift api server url (required: True, sensitive: False) +- **token**: The openshift token (required: True, sensitive: True) +- **insecure**: Skip TLS verification (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **connect_to_openshift**: Check if the provided token can connect to the openshift server (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query openshift + provider: openshift + config: "{{ provider.my_provider_name }}" + with: + command_type: {value} # The type of query to perform. Supported queries are: +- get_logs: Get logs from a pod +- get_events: Get events for a namespace or pod +- get_pods: List pods in a namespace or across all namespaces +- get_node_pressure: Get node pressure conditions +- get_pvc: List persistent volume claims +- get_routes: List OpenShift routes +- get_deploymentconfigs: List OpenShift deployment configs +- get_projects: List OpenShift projects + # Additional arguments for the query. +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query openshift + provider: openshift + config: "{{ provider.my_provider_name }}" + with: + action: {value} # The action to perform. Supported actions are: +- rollout_restart: Restart a deployment, statefulset, or daemonset +- restart_pod: Restart a pod by deleting it +- scale_deployment: Scale a deployment to specified replicas +- scale_deploymentconfig: Scale a deployment config to specified replicas + # Additional arguments for the action. +``` + + + + +Check the following workflow examples: +- [openshift_basic.yml](https://github.com/keephq/keep/blob/main/examples/workflows/openshift_basic.yml) +- [openshift_monitoring_and_remediation.yml](https://github.com/keephq/keep/blob/main/examples/workflows/openshift_monitoring_and_remediation.yml) +- [openshift_pod_restart.yml](https://github.com/keephq/keep/blob/main/examples/workflows/openshift_pod_restart.yml) diff --git a/docs/snippets/providers/opsgenie-snippet-autogenerated.mdx b/docs/snippets/providers/opsgenie-snippet-autogenerated.mdx new file mode 100644 index 0000000000..07d4ac8629 --- /dev/null +++ b/docs/snippets/providers/opsgenie-snippet-autogenerated.mdx @@ -0,0 +1,71 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: OpsGenie api key (required: True, sensitive: True) +- **integration_name**: OpsGenie integration name (required: True, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **opsgenie:create**: Create OpsGenie alerts (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query opsgenie + provider: opsgenie + config: "{{ provider.my_provider_name }}" + with: + query_type: {value} + query: {value} +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query opsgenie + provider: opsgenie + config: "{{ provider.my_provider_name }}" + with: + user: {value} # Display name of the request owner + note: {value} # Additional note that will be added while creating the alert + source: {value} # Source field of the alert. Default value is IP address of the incoming request + message: {value} # Message of the alert + alias: {value} # Client-defined identifier of the alert, that is also the key element of alert deduplication + description: {value} # Description field of the alert that is generally used to provide a detailed information + responders: {value} # Responders that the alert will be routed to send notifications + visible_to: {value} # Teams and users that the alert will become visible to without sending any notification + actions: {value} # Custom actions that will be available for the alert + tags: {value} # Tags of the alert + details: {value} # Map of key-value pairs to use as custom properties of the alert + entity: {value} # Entity field of the alert that is generally used to specify which domain alert is related to + priority: {value} # Priority level of the alert + type: {value} # Type of the request, e.g. create_alert, close_alert + # Additional arguments +``` + + + + +Check the following workflow examples: +- [failed-to-login-workflow.yml](https://github.com/keephq/keep/blob/main/examples/workflows/failed-to-login-workflow.yml) +- [opsgenie-close-alert.yml](https://github.com/keephq/keep/blob/main/examples/workflows/opsgenie-close-alert.yml) +- [opsgenie-create-alert-cel.yml](https://github.com/keephq/keep/blob/main/examples/workflows/opsgenie-create-alert-cel.yml) +- [opsgenie-create-alert.yml](https://github.com/keephq/keep/blob/main/examples/workflows/opsgenie-create-alert.yml) +- [opsgenie_open_alerts.yml](https://github.com/keephq/keep/blob/main/examples/workflows/opsgenie_open_alerts.yml) + + +## Provider Methods +The provider exposes the following [Provider Methods](/providers/provider-methods#via-ai-assistant). They are available in the [AI Assistant](/overview/ai-incident-assistant). + +- **close_alert** Close an alert (action, scopes: opsgenie:create) + +- **comment_alert** Comment an alert (action, scopes: opsgenie:create) diff --git a/docs/snippets/providers/pagerduty-snippet-autogenerated.mdx b/docs/snippets/providers/pagerduty-snippet-autogenerated.mdx new file mode 100644 index 0000000000..1f37116de7 --- /dev/null +++ b/docs/snippets/providers/pagerduty-snippet-autogenerated.mdx @@ -0,0 +1,70 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **routing_key**: Routing Key (an integration or ruleset key) (required: False, sensitive: False) +- **api_key**: Api Key (a user or team API key) (required: False, sensitive: True) +- **oauth_data**: For oauth flow (required: False, sensitive: True) +- **service_id**: Service Id (if provided, keep will only operate on this service) (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **incidents_read**: Read incidents data. (mandatory) +- **incidents_write**: Write incidents. +- **webhook_subscriptions_read**: Read webhook data. +- **webhook_subscriptions_write**: Write webhooks. + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query pagerduty + provider: pagerduty + config: "{{ provider.my_provider_name }}" + with: + incident_id: {value} + incident_key: {value} +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query pagerduty + provider: pagerduty + config: "{{ provider.my_provider_name }}" + with: + title: {value} # Title of the alert or incident + dedup: {value} # String used to deduplicate alerts for events API, max 255 chars + service_id: {value} # ID of the service for incidents + routing_key: {value} # API routing_key (optional), if not specified, fallbacks to the one provided in provider + requester: {value} # Email of the user requesting the incident creation + incident_id: {value} # Key to identify the incident. UUID generated if not provided + event_type: {value} # Event type for events API (trigger/acknowledge/resolve) + severity: {value} # Severity for events API (critical/error/warning/info) + source: {value} # Source field for events API + priority: {value} # Priority reference ID for incidents + status: {value} # Status for incident updates (resolved/acknowledged) + resolution: {value} # Resolution note for resolved incidents + body: {value} # Body of the incident as per https://developer.pagerduty.com/api-reference/a7d81b0e9200f-create-an-incident#request-body + kwargs: {value} # Additional event/incident fields +``` + + + + +Check the following workflow examples: +- [ifelse.yml](https://github.com/keephq/keep/blob/main/examples/workflows/ifelse.yml) +- [pagerduty.yml](https://github.com/keephq/keep/blob/main/examples/workflows/pagerduty.yml) + + +## Topology +This provider pulls [topology](/overview/servicetopology) to Keep. It could be used in [correlations](/overview/correlation-topology) +and [mapping](/overview/enrichment/mapping#mapping-with-topology-data), and as a context +for [alerts](/alerts/sidebar#7-alert-topology-view) and [incidents](/overview#17-incident-topology). \ No newline at end of file diff --git a/docs/snippets/providers/pagertree-snippet-autogenerated.mdx b/docs/snippets/providers/pagertree-snippet-autogenerated.mdx new file mode 100644 index 0000000000..212359d63d --- /dev/null +++ b/docs/snippets/providers/pagertree-snippet-autogenerated.mdx @@ -0,0 +1,41 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_token**: Your pagertree APIToken (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: The user can connect to the server and is authenticated using their API_Key (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query pagertree + provider: pagertree + config: "{{ provider.my_provider_name }}" + with: + title: {value} # Title of the alert. + urgency: {value} # low|medium|high|critical + incident: {value} # True if the alert is an incident + severities: {value} # SEV-1|SEV-2|SEV-3|SEV-4|SEV-5|SEV_UNKNOWN + incident_message: {value} # Message to be displayed in the incident + description: {value} # UTF-8 string of custom message for alert. Shown in incident description + status: {value} # alert status to send + destination_team_ids: {value} # destination team_ids to send alert to + destination_router_ids: {value} # destination router_ids to send alert to + destination_account_user_ids: {value} # destination account_users_ids to send alert to + # Additional parameters to be passed +``` + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/parseable-snippet-autogenerated.mdx b/docs/snippets/providers/parseable-snippet-autogenerated.mdx new file mode 100644 index 0000000000..64d2e45784 --- /dev/null +++ b/docs/snippets/providers/parseable-snippet-autogenerated.mdx @@ -0,0 +1,52 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **parseable_server**: Parseable Frontend URL (required: True, sensitive: False) +- **username**: Parseable username (required: True, sensitive: False) +- **password**: Parseable password (required: True, sensitive: True) + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + + +## Connecting via Webhook (omnidirectional) + +This is an example of how to configure an alert to be sent to Keep using Parseable's webhook feature. Post this to https://YOUR_PARSEABLE_SERVER/api/v1/logstream/YOUR_STREAM_NAME/alert + +``` +{{ + "version": "v1", + "alerts": [ + {{ + "name": "Alert: Server side error", + "message": "server reporting status as 500", + "rule": {{ + "type": "column", + "config": {{ + "column": "status", + "operator": "=", + "value": 500, + "repeats": 2 + }} + }}, + "targets": [ + {{ + "type": "webhook", + "endpoint": "KEEP_BACKEND_URL/alerts/event/parseable", + "skip_tls_check": true, + "repeat": {{ + "interval": "10s", + "times": 5 + }}, + "headers": {{"X-API-KEY": "{api_key}"}} + }} + ] + }} + ] +}} +``` diff --git a/docs/snippets/providers/pingdom-snippet-autogenerated.mdx b/docs/snippets/providers/pingdom-snippet-autogenerated.mdx new file mode 100644 index 0000000000..75ae5b3d5b --- /dev/null +++ b/docs/snippets/providers/pingdom-snippet-autogenerated.mdx @@ -0,0 +1,29 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: Pingdom API Key (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **read**: Read alerts from Pingdom. (mandatory) + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + + +## Connecting via Webhook (omnidirectional) + +Install Keep as Pingdom webhook + 1. Go to Settings > Integrations. + 2. Click Add Integration. + 3. Enter: + Type = Webhook + Name = Keep + URL = Your Keep Backend URL + 4. Click Save Integration. + diff --git a/docs/snippets/providers/planner-snippet-autogenerated.mdx b/docs/snippets/providers/planner-snippet-autogenerated.mdx new file mode 100644 index 0000000000..3169571691 --- /dev/null +++ b/docs/snippets/providers/planner-snippet-autogenerated.mdx @@ -0,0 +1,33 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **tenant_id**: Planner Tenant ID (required: True, sensitive: True) +- **client_id**: Planner Client ID (required: True, sensitive: True) +- **client_secret**: Planner Client Secret (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query planner + provider: planner + config: "{{ provider.my_provider_name }}" + with: + plan_id: {value} + title: {value} + bucket_id: {value} +``` + + + + +Check the following workflow example: +- [planner_basic.yml](https://github.com/keephq/keep/blob/main/examples/workflows/planner_basic.yml) diff --git a/docs/snippets/providers/postgres-snippet-autogenerated.mdx b/docs/snippets/providers/postgres-snippet-autogenerated.mdx new file mode 100644 index 0000000000..96ba6bc664 --- /dev/null +++ b/docs/snippets/providers/postgres-snippet-autogenerated.mdx @@ -0,0 +1,54 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **username**: Postgres username (required: True, sensitive: False) +- **password**: Postgres password (required: True, sensitive: True) +- **host**: Postgres hostname (required: True, sensitive: False) +- **database**: Postgres database name (required: False, sensitive: False) +- **port**: Postgres port (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **connect_to_server**: The user can connect to the server (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query postgres + provider: postgres + config: "{{ provider.my_provider_name }}" + with: + query: {value} +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query postgres + provider: postgres + config: "{{ provider.my_provider_name }}" + with: + query: {value} +``` + + + + +Check the following workflow example: +- [disk_grown_defects_rule.yml](https://github.com/keephq/keep/blob/main/examples/workflows/disk_grown_defects_rule.yml) + + +## Provider Methods +The provider exposes the following [Provider Methods](/providers/provider-methods#via-ai-assistant). They are available in the [AI Assistant](/overview/ai-incident-assistant). + +- **execute_query** Query the Postgres database (view, scopes: no additional scopes) + diff --git a/docs/snippets/providers/posthog-snippet-autogenerated.mdx b/docs/snippets/providers/posthog-snippet-autogenerated.mdx new file mode 100644 index 0000000000..23ee61ac3e --- /dev/null +++ b/docs/snippets/providers/posthog-snippet-autogenerated.mdx @@ -0,0 +1,52 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: PostHog API key (required: True, sensitive: True) +- **project_id**: PostHog project ID (required: True, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **session_recording:read**: Read PostHog session recordings (mandatory) +- **session_recording_playlist:read**: Read PostHog session recording playlists +- **project:read**: Read PostHog project data (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query posthog + provider: posthog + config: "{{ provider.my_provider_name }}" + with: + query_type: {value} # Type of query (e.g., "session_recording_domains", "session_recordings") + hours: {value} # Number of hours to look back + limit: {value} # Maximum number of items to fetch + # Additional arguments +``` + + + + + +Check the following workflow example: +- [posthog_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/posthog_example.yml) + + +## Provider Methods +The provider exposes the following [Provider Methods](/providers/provider-methods#via-ai-assistant). They are available in the [AI Assistant](/overview/ai-incident-assistant). + +- **get_session_recording_domains** Get a list of domains from session recordings within a time period (action, scopes: session_recording:read, project:read) + + - `hours`: Number of hours to look back (default: 24) + - `limit`: Maximum number of recordings to fetch (default: 100) +- **get_session_recordings** Get session recordings within a time period (action, scopes: session_recording:read, project:read) + + - `hours`: Number of hours to look back (default: 24) + - `limit`: Maximum number of recordings to fetch (default: 100) diff --git a/docs/snippets/providers/prometheus-snippet-autogenerated.mdx b/docs/snippets/providers/prometheus-snippet-autogenerated.mdx new file mode 100644 index 0000000000..c574844c38 --- /dev/null +++ b/docs/snippets/providers/prometheus-snippet-autogenerated.mdx @@ -0,0 +1,65 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **url**: Prometheus server URL (required: True, sensitive: False) +- **username**: Prometheus username (required: False, sensitive: False) +- **password**: Prometheus password (required: False, sensitive: True) +- **verify**: Verify SSL certificates (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **connectivity**: Connectivity Test (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query prometheus + provider: prometheus + config: "{{ provider.my_provider_name }}" + with: + query: {value} +``` + + + + + +Check the following workflow examples: +- [create_service_now_ticket_upon_alerts.yml](https://github.com/keephq/keep/blob/main/examples/workflows/create_service_now_ticket_upon_alerts.yml) +- [enrich_using_structured_output_from_deepseek.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/enrich_using_structured_output_from_deepseek.yaml) +- [enrich_using_structured_output_from_openai.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/enrich_using_structured_output_from_openai.yaml) +- [enrich_using_structured_output_from_vllm_qwen.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/enrich_using_structured_output_from_vllm_qwen.yaml) +- [http_enrich.yml](https://github.com/keephq/keep/blob/main/examples/workflows/http_enrich.yml) +- [multi-condition-cel.yml](https://github.com/keephq/keep/blob/main/examples/workflows/multi-condition-cel.yml) + +## Connecting via Webhook (omnidirectional) + +This provider takes advantage of configurable webhooks available with Prometheus Alertmanager. Use the following template to configure AlertManager: + +``` +route: + receiver: "keep" + group_by: ['alertname'] + group_wait: 15s + group_interval: 15s + repeat_interval: 1m + continue: true + +receivers: +- name: "keep" + webhook_configs: + - url: 'KEEP_BACKEND_URL/alerts/event/prometheus' + send_resolved: true + http_config: + basic_auth: + username: api_key + password: {api_key} +``` diff --git a/docs/snippets/providers/pushover-snippet-autogenerated.mdx b/docs/snippets/providers/pushover-snippet-autogenerated.mdx new file mode 100644 index 0000000000..f485cec492 --- /dev/null +++ b/docs/snippets/providers/pushover-snippet-autogenerated.mdx @@ -0,0 +1,28 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **token**: Pushover app token (required: True, sensitive: True) +- **user_key**: Pushover user key (required: True, sensitive: False) + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query pushover + provider: pushover + config: "{{ provider.my_provider_name }}" + with: + message: {value} # The content of the message. +``` + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/python-snippet-autogenerated.mdx b/docs/snippets/providers/python-snippet-autogenerated.mdx new file mode 100644 index 0000000000..ad966f38e4 --- /dev/null +++ b/docs/snippets/providers/python-snippet-autogenerated.mdx @@ -0,0 +1,27 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query python + provider: python + config: "{{ provider.my_provider_name }}" + with: + code: {value} + imports: {value} +``` + + + + + +Check the following workflow examples: +- [bash_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/bash_example.yml) +- [mustache-paths-example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/mustache-paths-example.yml) diff --git a/docs/snippets/providers/quickchart-snippet-autogenerated.mdx b/docs/snippets/providers/quickchart-snippet-autogenerated.mdx new file mode 100644 index 0000000000..3f26b80458 --- /dev/null +++ b/docs/snippets/providers/quickchart-snippet-autogenerated.mdx @@ -0,0 +1,29 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: Quickchart API Key (required: False, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query quickchart + provider: quickchart + config: "{{ provider.my_provider_name }}" + with: + fingerprint: {value} + status: {value} + chartConfig: {value} +``` + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/redmine-snippet-autogenerated.mdx b/docs/snippets/providers/redmine-snippet-autogenerated.mdx new file mode 100644 index 0000000000..1de17d7665 --- /dev/null +++ b/docs/snippets/providers/redmine-snippet-autogenerated.mdx @@ -0,0 +1,36 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **host**: Redmine Host (required: True, sensitive: False) +- **api_access_key**: Redmine API Access key (required: True, sensitive: True) +- **ticket_creation_url**: URL for creating new tickets (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: Authenticated with Redmine API (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query redmine + provider: redmine + config: "{{ provider.my_provider_name }}" + with: + project_id: {value} + subject: {value} + priority_id: {value} + description: {value} +``` + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/resend-snippet-autogenerated.mdx b/docs/snippets/providers/resend-snippet-autogenerated.mdx new file mode 100644 index 0000000000..e94dda9d93 --- /dev/null +++ b/docs/snippets/providers/resend-snippet-autogenerated.mdx @@ -0,0 +1,32 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: Resend API key (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query resend + provider: resend + config: "{{ provider.my_provider_name }}" + with: + _from: {value} # From email address + to: {value} # To email address + subject: {value} # Email subject + html: {value} # Email body +``` + + + + +Check the following workflow example: +- [bash_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/bash_example.yml) diff --git a/docs/snippets/providers/rollbar-snippet-autogenerated.mdx b/docs/snippets/providers/rollbar-snippet-autogenerated.mdx new file mode 100644 index 0000000000..6250b8f57b --- /dev/null +++ b/docs/snippets/providers/rollbar-snippet-autogenerated.mdx @@ -0,0 +1,17 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **rollbarAccessToken**: Project Access Token (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: User is Authenticated + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/s3-snippet-autogenerated.mdx b/docs/snippets/providers/s3-snippet-autogenerated.mdx new file mode 100644 index 0000000000..248636b440 --- /dev/null +++ b/docs/snippets/providers/s3-snippet-autogenerated.mdx @@ -0,0 +1,31 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **access_key**: S3 Access Token (Leave empty if using IAM role at EC2) (required: False, sensitive: True) +- **secret_access_key**: S3 Secret Access Token (Leave empty if using IAM role at EC2) (required: False, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query s3 + provider: s3 + config: "{{ provider.my_provider_name }}" + with: + bucket: {value} +``` + + + + + +Check the following workflow examples: +- [consts_and_dict.yml](https://github.com/keephq/keep/blob/main/examples/workflows/consts_and_dict.yml) +- [update_workflows_from_s3.yml](https://github.com/keephq/keep/blob/main/examples/workflows/update_workflows_from_s3.yml) diff --git a/docs/snippets/providers/salesforce-snippet-autogenerated.mdx b/docs/snippets/providers/salesforce-snippet-autogenerated.mdx new file mode 100644 index 0000000000..8de8653683 --- /dev/null +++ b/docs/snippets/providers/salesforce-snippet-autogenerated.mdx @@ -0,0 +1,13 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: Salesforce API key (required: True, sensitive: True) + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/sendgrid-snippet-autogenerated.mdx b/docs/snippets/providers/sendgrid-snippet-autogenerated.mdx new file mode 100644 index 0000000000..f4a3202d7e --- /dev/null +++ b/docs/snippets/providers/sendgrid-snippet-autogenerated.mdx @@ -0,0 +1,37 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: SendGrid API key (required: True, sensitive: True) +- **from_email**: From email address (required: True, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **email.send**: Send emails using SendGrid (mandatory) ([Documentation](https://sendgrid.com/docs/API_Reference/api_v3.html)) + + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query sendgrid + provider: sendgrid + config: "{{ provider.my_provider_name }}" + with: + to: {value} # To email address or list of email addresses + subject: {value} # Email subject + html: {value} # Email body +``` + + + + +Check the following workflow examples: +- [consts_and_vars.yml](https://github.com/keephq/keep/blob/main/examples/workflows/consts_and_vars.yml) +- [sendgrid_basic.yml](https://github.com/keephq/keep/blob/main/examples/workflows/sendgrid_basic.yml) diff --git a/docs/snippets/providers/sentry-snippet-autogenerated.mdx b/docs/snippets/providers/sentry-snippet-autogenerated.mdx new file mode 100644 index 0000000000..dce5cc87ab --- /dev/null +++ b/docs/snippets/providers/sentry-snippet-autogenerated.mdx @@ -0,0 +1,37 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: Sentry Api Key (required: True, sensitive: True) +- **organization_slug**: Sentry organization slug (required: True, sensitive: False) +- **api_url**: Sentry API URL (required: False, sensitive: False) +- **project_slug**: Sentry project slug within the organization (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- ****: Write permission for projects in organization + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query sentry + provider: sentry + config: "{{ provider.my_provider_name }}" + with: + project: {value} # project name + time: {value} # time range, for example: 14d +``` + + + + + +Check the following workflow example: +- [create_jira_ticket_upon_alerts.yml](https://github.com/keephq/keep/blob/main/examples/workflows/create_jira_ticket_upon_alerts.yml) diff --git a/docs/snippets/providers/servicenow-snippet-autogenerated.mdx b/docs/snippets/providers/servicenow-snippet-autogenerated.mdx new file mode 100644 index 0000000000..63a38e6b57 --- /dev/null +++ b/docs/snippets/providers/servicenow-snippet-autogenerated.mdx @@ -0,0 +1,78 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **service_now_base_url**: The base URL of the ServiceNow instance (required: True, sensitive: False) +- **username**: The username of the ServiceNow user (required: True, sensitive: False) +- **password**: The password of the ServiceNow user (required: True, sensitive: True) +- **client_id**: The client ID to use OAuth 2.0 based authentication (required: False, sensitive: False) +- **client_secret**: The client secret to use OAuth 2.0 based authentication (required: False, sensitive: True) +- **ticket_creation_url**: URL for creating new tickets (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **itil**: The user can read/write tickets from the table (mandatory) ([Documentation](https://docs.servicenow.com/bundle/sandiego-platform-administration/page/administer/roles/reference/r_BaseSystemRoles.html)) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query servicenow + provider: servicenow + config: "{{ provider.my_provider_name }}" + with: + table_name: {value} # The name of the table to query. + incident_id: {value} # The incident ID to query. + sysparm_limit: {value} # The maximum number of records to return. + sysparm_offset: {value} # The offset to start from. +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query servicenow + provider: servicenow + config: "{{ provider.my_provider_name }}" + with: + table_name: {value} # The name of the table to create the ticket in. + payload: {value} # The ticket payload. + ticket_id: {value} # The ticket ID (optional to update a ticket). + fingerprint: {value} # The fingerprint of the ticket (optional to update a ticket). +``` + + + + +Check the following workflow examples: +- [blogpost.yml](https://github.com/keephq/keep/blob/main/examples/workflows/blogpost.yml) +- [clickhouse_multiquery.yml](https://github.com/keephq/keep/blob/main/examples/workflows/clickhouse_multiquery.yml) +- [create_service_now_ticket_upon_alerts.yml](https://github.com/keephq/keep/blob/main/examples/workflows/create_service_now_ticket_upon_alerts.yml) +- [update_service_now_tickets_status.yml](https://github.com/keephq/keep/blob/main/examples/workflows/update_service_now_tickets_status.yml) + + +## Topology +This provider pulls [topology](/overview/servicetopology) to Keep. It could be used in [correlations](/overview/correlation-topology) +and [mapping](/overview/enrichment/mapping#mapping-with-topology-data), and as a context +for [alerts](/alerts/sidebar#7-alert-topology-view) and [incidents](/overview#17-incident-topology). + +## Provider Methods +The provider exposes the following [Provider Methods](/providers/provider-methods#via-ai-assistant). They are available in the [AI Assistant](/overview/ai-incident-assistant). + +- **get_incidents** Fetch all incidents from ServiceNow (view, scopes: itil) + +- **get_incident_activities** Get work notes and comments from a ServiceNow incident (view, scopes: itil) + + - `incident_id`: The incident number (e.g. INC0010001) or sys_id. + - `limit`: Maximum number of activity records to return. +- **add_incident_activity** Add a work note or comment to a ServiceNow incident (action, scopes: itil) + + - `incident_id`: The incident number (e.g. INC0010001) or sys_id. + - `content`: The text content to add. + - `activity_type`: Either 'work_notes' or 'comments'. Defaults to 'work_notes'. diff --git a/docs/snippets/providers/signalfx-snippet-autogenerated.mdx b/docs/snippets/providers/signalfx-snippet-autogenerated.mdx new file mode 100644 index 0000000000..d4535e9c73 --- /dev/null +++ b/docs/snippets/providers/signalfx-snippet-autogenerated.mdx @@ -0,0 +1,21 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **sf_token**: SignalFX token (required: True, sensitive: True) +- **realm**: SignalFX Realm (required: False, sensitive: False) +- **email**: SignalFX email. Required for setup webhook. (required: False, sensitive: True) +- **password**: SignalFX password. Required for setup webhook. (required: False, sensitive: True) +- **org_id**: SignalFX organization ID. Required for setup webhook. (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **API**: API authScope - read permission for SignalFx API (mandatory) ([Documentation](https://dev.splunk.com/observability/reference/api/org_tokens/latest#endpoint-create-single-token)) + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/signl4-snippet-autogenerated.mdx b/docs/snippets/providers/signl4-snippet-autogenerated.mdx new file mode 100644 index 0000000000..c6395f533b --- /dev/null +++ b/docs/snippets/providers/signl4-snippet-autogenerated.mdx @@ -0,0 +1,42 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **signl4_integration_secret**: SIGNL4 integration or team secret (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **signl4:create**: Create SIGNL4 alerts (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query signl4 + provider: signl4 + config: "{{ provider.my_provider_name }}" + with: + title: {value} # Alert title. + message: {value} # Alert message. + user: {value} # User name. + s4_external_id: {value} # External ID. + s4_status: {value} # Alert status. + s4_service: {value} # Service name. + s4_location: {value} # Location. + s4_alerting_scenario: {value} # Alerting scenario. + s4_filtering: {value} # Filtering. + # Additional alert data. +``` + + + + +Check the following workflow example: +- [signl4-alerting-workflow.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/signl4-alerting-workflow.yaml) diff --git a/docs/snippets/providers/site24x7-snippet-autogenerated.mdx b/docs/snippets/providers/site24x7-snippet-autogenerated.mdx new file mode 100644 index 0000000000..b7285a9b46 --- /dev/null +++ b/docs/snippets/providers/site24x7-snippet-autogenerated.mdx @@ -0,0 +1,21 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **zohoRefreshToken**: Zoho Refresh Token (required: True, sensitive: True) +- **zohoClientId**: Zoho Client Id (required: True, sensitive: True) +- **zohoClientSecret**: Zoho Client Secret (required: True, sensitive: True) +- **zohoAccountTLD**: Zoho Account's TLD (.com | .eu | .com.cn | .in | .au | .jp) (required: True, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: User is Authenticated (mandatory) +- **valid_tld**: TLD is amongst the list [.com | .eu | .com.cn | .in | .com.au | .jp] (mandatory) + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/slack-snippet-autogenerated.mdx b/docs/snippets/providers/slack-snippet-autogenerated.mdx new file mode 100644 index 0000000000..f7e6a15064 --- /dev/null +++ b/docs/snippets/providers/slack-snippet-autogenerated.mdx @@ -0,0 +1,66 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **webhook_url**: Slack Webhook Url (required: True, sensitive: True) +- **access_token**: For access token installation flow, use Keep UI (required: False, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query slack + provider: slack + config: "{{ provider.my_provider_name }}" + with: + message: {value} # The content of the message. + blocks: {value} # The blocks of the message. + channel: {value} # The channel to send the message + slack_timestamp: {value} # The timestamp of the message to update + thread_timestamp: {value} # The timestamp of the thread to send the message + attachments: {value} # The attachments of the message. + username: {value} # The username of the message. + notification_type: {value} # The type of notification. +``` + + + + +Check the following workflow examples: +- [consts_and_vars.yml](https://github.com/keephq/keep/blob/main/examples/workflows/consts_and_vars.yml) +- [create_jira_ticket_upon_alerts.yml](https://github.com/keephq/keep/blob/main/examples/workflows/create_jira_ticket_upon_alerts.yml) +- [datadog-log-monitor.yml](https://github.com/keephq/keep/blob/main/examples/workflows/datadog-log-monitor.yml) +- [db_disk_space_monitor.yml](https://github.com/keephq/keep/blob/main/examples/workflows/db_disk_space_monitor.yml) +- [elastic_enrich_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/elastic_enrich_example.yml) +- [failed-to-login-workflow.yml](https://github.com/keephq/keep/blob/main/examples/workflows/failed-to-login-workflow.yml) +- [gcp_logging_open_ai.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/gcp_logging_open_ai.yaml) +- [ifelse.yml](https://github.com/keephq/keep/blob/main/examples/workflows/ifelse.yml) +- [incident-tier-escalation.yml](https://github.com/keephq/keep/blob/main/examples/workflows/incident-tier-escalation.yml) +- [new-auth0-users-monitor.yml](https://github.com/keephq/keep/blob/main/examples/workflows/new-auth0-users-monitor.yml) +- [new_github_stars.yml](https://github.com/keephq/keep/blob/main/examples/workflows/new_github_stars.yml) +- [notify-new-trello-card.yml](https://github.com/keephq/keep/blob/main/examples/workflows/notify-new-trello-card.yml) +- [openshift_monitoring_and_remediation.yml](https://github.com/keephq/keep/blob/main/examples/workflows/openshift_monitoring_and_remediation.yml) +- [opsgenie_open_alerts.yml](https://github.com/keephq/keep/blob/main/examples/workflows/opsgenie_open_alerts.yml) +- [permissions_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/permissions_example.yml) +- [posthog_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/posthog_example.yml) +- [query_clickhouse.yml](https://github.com/keephq/keep/blob/main/examples/workflows/query_clickhouse.yml) +- [query_victoriametrics.yml](https://github.com/keephq/keep/blob/main/examples/workflows/query_victoriametrics.yml) +- [raw_sql_query_datetime.yml](https://github.com/keephq/keep/blob/main/examples/workflows/raw_sql_query_datetime.yml) +- [send_slack_message_on_failure.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/send_slack_message_on_failure.yaml) +- [service-error-rate-monitor-datadog.yml](https://github.com/keephq/keep/blob/main/examples/workflows/service-error-rate-monitor-datadog.yml) +- [slack-message-reaction.yml](https://github.com/keephq/keep/blob/main/examples/workflows/slack-message-reaction.yml) +- [slack-workflow-trigger.yml](https://github.com/keephq/keep/blob/main/examples/workflows/slack-workflow-trigger.yml) +- [slack_basic.yml](https://github.com/keephq/keep/blob/main/examples/workflows/slack_basic.yml) +- [slack_basic_cel.yml](https://github.com/keephq/keep/blob/main/examples/workflows/slack_basic_cel.yml) +- [slack_basic_interval.yml](https://github.com/keephq/keep/blob/main/examples/workflows/slack_basic_interval.yml) +- [slack_message_update.yml](https://github.com/keephq/keep/blob/main/examples/workflows/slack_message_update.yml) +- [workflow_only_first_time_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/workflow_only_first_time_example.yml) +- [workflow_start_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/workflow_start_example.yml) +- [zoom_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/zoom_example.yml) diff --git a/docs/snippets/providers/smtp-snippet-autogenerated.mdx b/docs/snippets/providers/smtp-snippet-autogenerated.mdx new file mode 100644 index 0000000000..0286ca57e8 --- /dev/null +++ b/docs/snippets/providers/smtp-snippet-autogenerated.mdx @@ -0,0 +1,43 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **smtp_server**: SMTP Server Address (required: True, sensitive: False) +- **smtp_port**: SMTP port (required: True, sensitive: False) +- **encryption**: SMTP encryption (required: True, sensitive: False) +- **smtp_username**: SMTP username (required: False, sensitive: False) +- **smtp_password**: SMTP password (required: False, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **send_email**: Send email using SMTP protocol (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query smtp + provider: smtp + config: "{{ provider.my_provider_name }}" + with: + from_email: {value} + from_name: {value} + to_email: {value} + subject: {value} + body: {value} + html: {value} +``` + + + + +Check the following workflow examples: +- [send_smtp_email.yml](https://github.com/keephq/keep/blob/main/examples/workflows/send_smtp_email.yml) +- [send_smtp_html_email.yml](https://github.com/keephq/keep/blob/main/examples/workflows/send_smtp_html_email.yml) diff --git a/docs/snippets/providers/snowflake-snippet-autogenerated.mdx b/docs/snippets/providers/snowflake-snippet-autogenerated.mdx new file mode 100644 index 0000000000..7b42223cc3 --- /dev/null +++ b/docs/snippets/providers/snowflake-snippet-autogenerated.mdx @@ -0,0 +1,30 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **user**: Snowflake user (required: True, sensitive: False) +- **account**: Snowflake account (required: True, sensitive: False) +- **pkey**: Snowflake private key (required: True, sensitive: True) +- **pkey_passphrase**: Snowflake password (required: False, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query snowflake + provider: snowflake + config: "{{ provider.my_provider_name }}" + with: + query: {value} # query to execute +``` + + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/splunk-snippet-autogenerated.mdx b/docs/snippets/providers/splunk-snippet-autogenerated.mdx new file mode 100644 index 0000000000..e956b1893a --- /dev/null +++ b/docs/snippets/providers/splunk-snippet-autogenerated.mdx @@ -0,0 +1,20 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: Splunk API Key (required: True, sensitive: True) +- **host**: Splunk Host (default is localhost) (required: False, sensitive: False) +- **port**: Splunk Port (default is 8089) (required: False, sensitive: False) +- **verify**: Enable SSL verification (required: False, sensitive: False) +- **username**: The username connected with the API key/token provided. (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **list_all_objects**: The user can get all the alerts (mandatory) +- **edit_own_objects**: The user can edit and add webhook to saved_searches (mandatory) + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/squadcast-snippet-autogenerated.mdx b/docs/snippets/providers/squadcast-snippet-autogenerated.mdx new file mode 100644 index 0000000000..5f0089196f --- /dev/null +++ b/docs/snippets/providers/squadcast-snippet-autogenerated.mdx @@ -0,0 +1,44 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **service_region**: Service region: EU/US (required: True, sensitive: False) +- **refresh_token**: Squadcast Refresh Token (required: False, sensitive: True) +- **webhook_url**: Incident webhook url (required: False, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: The user can connect to the client + + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query squadcast + provider: squadcast + config: "{{ provider.my_provider_name }}" + with: + notify_type: {value} + message: {value} + description: {value} + incident_id: {value} + priority: {value} + tags: {value} + status: {value} + event_id: {value} + attachments: {value} + additional_json: {value} +``` + + + + +Check the following workflow example: +- [squadcast_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/squadcast_example.yml) diff --git a/docs/snippets/providers/ssh-snippet-autogenerated.mdx b/docs/snippets/providers/ssh-snippet-autogenerated.mdx new file mode 100644 index 0000000000..bf44716245 --- /dev/null +++ b/docs/snippets/providers/ssh-snippet-autogenerated.mdx @@ -0,0 +1,38 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **host**: SSH hostname (required: True, sensitive: False) +- **user**: SSH user (required: True, sensitive: False) +- **port**: SSH port (required: False, sensitive: False) +- **pkey**: SSH private key (required: False, sensitive: True) +- **password**: SSH password (required: False, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **ssh_access**: The provided credentials grant access to the SSH server + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query ssh + provider: ssh + config: "{{ provider.my_provider_name }}" + with: + command: {value} + query: {value} # command to execute +``` + + + + + +Check the following workflow example: +- [businesshours.yml](https://github.com/keephq/keep/blob/main/examples/workflows/businesshours.yml) diff --git a/docs/snippets/providers/statuscake-snippet-autogenerated.mdx b/docs/snippets/providers/statuscake-snippet-autogenerated.mdx new file mode 100644 index 0000000000..5e07676de0 --- /dev/null +++ b/docs/snippets/providers/statuscake-snippet-autogenerated.mdx @@ -0,0 +1,17 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: Statuscake API Key (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **alerts**: Read alerts from Statuscake + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/sumologic-snippet-autogenerated.mdx b/docs/snippets/providers/sumologic-snippet-autogenerated.mdx new file mode 100644 index 0000000000..8da87b3e65 --- /dev/null +++ b/docs/snippets/providers/sumologic-snippet-autogenerated.mdx @@ -0,0 +1,20 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **sumoAccessId**: SumoLogic Access ID (required: True, sensitive: False) +- **sumoAccessKey**: SumoLogic Access Key (required: True, sensitive: True) +- **deployment**: Deployment Region (required: True, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: User is Authorized (mandatory) +- **authorized**: Required privileges (mandatory) + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/teams-snippet-autogenerated.mdx b/docs/snippets/providers/teams-snippet-autogenerated.mdx new file mode 100644 index 0000000000..f7ca9a6d30 --- /dev/null +++ b/docs/snippets/providers/teams-snippet-autogenerated.mdx @@ -0,0 +1,38 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **webhook_url**: Teams Webhook Url (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query teams + provider: teams + config: "{{ provider.my_provider_name }}" + with: + message: {value} # The message to send + typeCard: {value} # The card type. Can be "MessageCard" (legacy) or "message" (for Adaptive Cards). Default is "message" + themeColor: {value} # Hexadecimal color (only used with MessageCard type) + sections: {value} # For MessageCard: Array of custom information sections. For Adaptive Cards: Array of card elements following the Adaptive Card schema. Can be provided as a JSON string or array. + schema: {value} # Schema URL for Adaptive Cards. Default is "http://adaptivecards.io/schemas/adaptive-card.json" + attachments: {value} # Custom attachments array for Adaptive Cards (overrides default attachment structure). Can be provided as a JSON string or array. + mentions: {value} # List of user mentions to include in the Adaptive Card. Each mention should be a dict with 'id' (user ID, Microsoft Entra Object ID, or UPN) and 'name' (display name) keys. +Example: [{"id": "user-id-123", "name": "John Doe"}, {"id": "john.doe@example.com", "name": "John Doe"}] +``` + + + + +Check the following workflow examples: +- [create_jira_ticket_upon_alerts.yml](https://github.com/keephq/keep/blob/main/examples/workflows/create_jira_ticket_upon_alerts.yml) +- [teams-adaptive-card-notifier.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/teams-adaptive-card-notifier.yaml) +- [teams-adaptive-cards-with-mentions.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/teams-adaptive-cards-with-mentions.yaml) diff --git a/docs/snippets/providers/telegram-snippet-autogenerated.mdx b/docs/snippets/providers/telegram-snippet-autogenerated.mdx new file mode 100644 index 0000000000..97e274814c --- /dev/null +++ b/docs/snippets/providers/telegram-snippet-autogenerated.mdx @@ -0,0 +1,38 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **bot_token**: Telegram Bot Token (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query telegram + provider: telegram + config: "{{ provider.my_provider_name }}" + with: + chat_id: {value} # Unique identifier for the target chat or username of the target channel + topic_id: {value} # Unique identifier for the target message thread (topic) + message: {value} # Message to be sent + reply_markup: {value} # Inline keyboard markup to be attached to the message + reply_markup_layout: {value} # Direction of the reply markup, could be "horizontal" or "vertical" + parse_mode: {value} # Mode for parsing entities in the message text, could be "markdown" or "html" + image_url: {value} # URL of the image to be attached to the message + caption_on_image: {value} # Whether to use the message as a caption for the image +``` + + + + +Check the following workflow examples: +- [send-message-telegram-with-htmlmd.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/send-message-telegram-with-htmlmd.yaml) +- [telegram_advanced.yml](https://github.com/keephq/keep/blob/main/examples/workflows/telegram_advanced.yml) +- [telegram_basic.yml](https://github.com/keephq/keep/blob/main/examples/workflows/telegram_basic.yml) diff --git a/docs/snippets/providers/test_fluxcd-snippet-autogenerated.mdx b/docs/snippets/providers/test_fluxcd-snippet-autogenerated.mdx new file mode 100644 index 0000000000..5678e0e6e2 --- /dev/null +++ b/docs/snippets/providers/test_fluxcd-snippet-autogenerated.mdx @@ -0,0 +1,9 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/thousandeyes-snippet-autogenerated.mdx b/docs/snippets/providers/thousandeyes-snippet-autogenerated.mdx new file mode 100644 index 0000000000..16f17a54cb --- /dev/null +++ b/docs/snippets/providers/thousandeyes-snippet-autogenerated.mdx @@ -0,0 +1,17 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **oauth2_token**: OAuth2 Bearer Token (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: User is Authenticated + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/trello-snippet-autogenerated.mdx b/docs/snippets/providers/trello-snippet-autogenerated.mdx new file mode 100644 index 0000000000..2f40f9efc6 --- /dev/null +++ b/docs/snippets/providers/trello-snippet-autogenerated.mdx @@ -0,0 +1,31 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: Trello API Key (required: True, sensitive: True) +- **api_token**: Trello API Token (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query trello + provider: trello + config: "{{ provider.my_provider_name }}" + with: + board_id: {value} # Trello board ID + filter: {value} # Trello action filter +``` + + + + + +Check the following workflow example: +- [notify-new-trello-card.yml](https://github.com/keephq/keep/blob/main/examples/workflows/notify-new-trello-card.yml) diff --git a/docs/snippets/providers/twilio-snippet-autogenerated.mdx b/docs/snippets/providers/twilio-snippet-autogenerated.mdx new file mode 100644 index 0000000000..46975e3ffe --- /dev/null +++ b/docs/snippets/providers/twilio-snippet-autogenerated.mdx @@ -0,0 +1,34 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **account_sid**: Twilio Account SID (required: True, sensitive: False) +- **api_token**: Twilio API Token (required: True, sensitive: True) +- **from_phone_number**: Twilio Phone Number (required: True, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **send_sms**: The API token has permission to send the SMS (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query twilio + provider: twilio + config: "{{ provider.my_provider_name }}" + with: + message_body: {value} # The content of the SMS message to be sent. Defaults to "". + to_phone_number: {value} # The recipient's phone number. Defaults to "". +``` + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/uptimekuma-snippet-autogenerated.mdx b/docs/snippets/providers/uptimekuma-snippet-autogenerated.mdx new file mode 100644 index 0000000000..308ab8bf92 --- /dev/null +++ b/docs/snippets/providers/uptimekuma-snippet-autogenerated.mdx @@ -0,0 +1,19 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **host_url**: UptimeKuma Host URL (required: True, sensitive: False) +- **username**: UptimeKuma Username (required: True, sensitive: False) +- **password**: UptimeKuma Password (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **alerts**: Read alerts from UptimeKuma + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/vectordev-snippet-autogenerated.mdx b/docs/snippets/providers/vectordev-snippet-autogenerated.mdx new file mode 100644 index 0000000000..455c70e454 --- /dev/null +++ b/docs/snippets/providers/vectordev-snippet-autogenerated.mdx @@ -0,0 +1,13 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: API key (required: True, sensitive: True) + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/victorialogs-snippet-autogenerated.mdx b/docs/snippets/providers/victorialogs-snippet-autogenerated.mdx new file mode 100644 index 0000000000..fe801ca180 --- /dev/null +++ b/docs/snippets/providers/victorialogs-snippet-autogenerated.mdx @@ -0,0 +1,48 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **host_url**: VictoriaLogs Host URL (required: True, sensitive: False) +- **authentication_type**: Authentication Type (required: True, sensitive: False) +- **username**: HTTP basic authentication - Username (required: False, sensitive: False) +- **password**: HTTP basic authentication - Password (required: False, sensitive: True) +- **bearer_token**: Bearer Token (required: False, sensitive: True) +- **x_scope_orgid**: X-Scope-OrgID Header (required: False, sensitive: False) +- **insecure**: Skip TLS verification (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **authenticated**: The instance is valid and the user is authenticated + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query victorialogs + provider: victorialogs + config: "{{ provider.my_provider_name }}" + with: + queryType: {value} + query: {value} + time: {value} + start: {value} + end: {value} + step: {value} + account_id: {value} + project_id: {value} + limit: {value} + timeout: {value} +``` + + + + + +Check the following workflow example: +- [query_victorialogs.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/query_victorialogs.yaml) diff --git a/docs/snippets/providers/victoriametrics-snippet-autogenerated.mdx b/docs/snippets/providers/victoriametrics-snippet-autogenerated.mdx new file mode 100644 index 0000000000..e74757a473 --- /dev/null +++ b/docs/snippets/providers/victoriametrics-snippet-autogenerated.mdx @@ -0,0 +1,73 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **VMAlertHost**: The hostname or IP address where VMAlert is running (required: False, sensitive: False) +- **VMAlertPort**: The port number on which VMAlert is listening (required: False, sensitive: False) +- **VMAlertURL**: The full URL to the VMAlert instance. Alternative to Host/Port (required: False, sensitive: False) +- **VMBackendHost**: The hostname or IP address where VictoriaMetrics backend is running (required: False, sensitive: False) +- **VMBackendPort**: The port number on which VictoriaMetrics backend is listening (required: False, sensitive: False) +- **VMBackendURL**: The full URL to the VictoriaMetrics backend. Alternative to Host/Port (required: False, sensitive: False) +- **BasicAuthUsername**: Username for basic authentication (required: False, sensitive: False) +- **BasicAuthPassword**: Password for basic authentication (required: False, sensitive: True) +- **SkipValidation**: Enter 'true' to skip validation of authentication (required: False, sensitive: False) +- **insecure**: Skip TLS verification (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **connected**: The user can connect to the client (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query victoriametrics + provider: victoriametrics + config: "{{ provider.my_provider_name }}" + with: + query: {value} + start: {value} + end: {value} + step: {value} + queryType: {value} +``` + + + + + +Check the following workflow examples: +- [create_alert_from_vm_metric.yml](https://github.com/keephq/keep/blob/main/examples/workflows/create_alert_from_vm_metric.yml) +- [create_multi_alert_from_vm_metric.yml](https://github.com/keephq/keep/blob/main/examples/workflows/create_multi_alert_from_vm_metric.yml) +- [query_victoriametrics.yml](https://github.com/keephq/keep/blob/main/examples/workflows/query_victoriametrics.yml) + +## Connecting via Webhook (omnidirectional) + +This provider takes advantage of configurable webhooks available with Prometheus Alertmanager. Use the following template to configure AlertManager: + +``` +route: + receiver: "keep" + group_by: ['alertname'] + group_wait: 15s + group_interval: 15s + repeat_interval: 1m + continue: true + +receivers: +- name: "keep" + webhook_configs: + - url: 'KEEP_BACKEND_URL/alerts/event/victoriametrics' + send_resolved: true + http_config: + basic_auth: + username: api_key + password: {api_key} + +``` diff --git a/docs/snippets/providers/vllm-snippet-autogenerated.mdx b/docs/snippets/providers/vllm-snippet-autogenerated.mdx new file mode 100644 index 0000000000..cde7733e63 --- /dev/null +++ b/docs/snippets/providers/vllm-snippet-autogenerated.mdx @@ -0,0 +1,34 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_url**: vLLM API endpoint URL (required: True, sensitive: False) +- **api_key**: Optional API key if your vLLM deployment requires authentication (required: False, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query vllm + provider: vllm + config: "{{ provider.my_provider_name }}" + with: + prompt: {value} + temperature: {value} + model: {value} + max_tokens: {value} + structured_output_format: {value} +``` + + + + + +Check the following workflow example: +- [enrich_using_structured_output_from_vllm_qwen.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/enrich_using_structured_output_from_vllm_qwen.yaml) diff --git a/docs/snippets/providers/wazuh-snippet-autogenerated.mdx b/docs/snippets/providers/wazuh-snippet-autogenerated.mdx new file mode 100644 index 0000000000..8e1275f2d6 --- /dev/null +++ b/docs/snippets/providers/wazuh-snippet-autogenerated.mdx @@ -0,0 +1,9 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/webhook-snippet-autogenerated.mdx b/docs/snippets/providers/webhook-snippet-autogenerated.mdx new file mode 100644 index 0000000000..b31a85b308 --- /dev/null +++ b/docs/snippets/providers/webhook-snippet-autogenerated.mdx @@ -0,0 +1,60 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **url**: Webhook URL (required: True, sensitive: False) +- **verify**: Enable SSL verification (required: False, sensitive: False) +- **method**: HTTP method (required: True, sensitive: False) +- **http_basic_authentication_username**: HTTP basic authentication - Username (required: False, sensitive: False) +- **http_basic_authentication_password**: HTTP basic authentication - Password (required: False, sensitive: True) +- **api_key**: API key (required: False, sensitive: True) +- **headers**: Headers (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **send_webhook**: (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query webhook + provider: webhook + config: "{{ provider.my_provider_name }}" + with: + url: {value} + method: {value} + http_basic_authentication_username: {value} + http_basic_authentication_password: {value} + api_key: {value} + headers: {value} + body: {value} + params: {value} + fail_on_error: {value} +``` + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query webhook + provider: webhook + config: "{{ provider.my_provider_name }}" + with: + body: {value} + params: {value} +``` + + + + +Check the following workflow examples: +- [webhook_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/webhook_example.yml) +- [webhook_example_foreach.yml](https://github.com/keephq/keep/blob/main/examples/workflows/webhook_example_foreach.yml) +- [zoom_chat_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/zoom_chat_example.yml) diff --git a/docs/snippets/providers/websocket-snippet-autogenerated.mdx b/docs/snippets/providers/websocket-snippet-autogenerated.mdx new file mode 100644 index 0000000000..ede7b81c52 --- /dev/null +++ b/docs/snippets/providers/websocket-snippet-autogenerated.mdx @@ -0,0 +1,25 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + + +## In workflows + +This provider can be used in workflows. + + +As "step" to query data, example: +```yaml +steps: + - name: Query websocket + provider: websocket + config: "{{ provider.my_provider_name }}" + with: + socket_url: {value} # The websocket URL to query. + timeout: {value} # Connection Timeout. Defaults to None. + data: {value} # Data to send through the websocket. Defaults to None. +``` + + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/youtrack-snippet-autogenerated.mdx b/docs/snippets/providers/youtrack-snippet-autogenerated.mdx new file mode 100644 index 0000000000..ebb2099229 --- /dev/null +++ b/docs/snippets/providers/youtrack-snippet-autogenerated.mdx @@ -0,0 +1,37 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **host_url**: YouTrack Host URL (required: True, sensitive: False) +- **project_id**: YouTrack Project ID (required: True, sensitive: False) +- **permanent_token**: YouTrack Permanent Token (required: True, sensitive: True) +- **ticket_creation_url**: URL for creating new tickets (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **create_issue**: (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query youtrack + provider: youtrack + config: "{{ provider.my_provider_name }}" + with: + summary: {value} + description: {value} +``` + + + + +Check the following workflow example: +- [create-issue-youtrack.yaml](https://github.com/keephq/keep/blob/main/examples/workflows/create-issue-youtrack.yaml) diff --git a/docs/snippets/providers/zabbix-snippet-autogenerated.mdx b/docs/snippets/providers/zabbix-snippet-autogenerated.mdx new file mode 100644 index 0000000000..dd361477af --- /dev/null +++ b/docs/snippets/providers/zabbix-snippet-autogenerated.mdx @@ -0,0 +1,68 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **zabbix_frontend_url**: Zabbix Frontend URL (required: True, sensitive: False) +- **auth_token**: Zabbix Auth Token (required: True, sensitive: True) +- **verify**: Verify SSL certificates (required: False, sensitive: False) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **action.create**: This method allows to create new actions. (mandatory) ([Documentation](https://www.zabbix.com/documentation/current/en/manual/api/reference/action/create)) +- **action.get**: This method allows to retrieve actions. (mandatory) ([Documentation](https://www.zabbix.com/documentation/current/en/manual/api/reference/action/get)) +- **event.acknowledge**: This method allows to update events. (mandatory) ([Documentation](https://www.zabbix.com/documentation/current/en/manual/api/reference/event/acknowledge)) +- **mediatype.create**: This method allows to create new media types. (mandatory) ([Documentation](https://www.zabbix.com/documentation/current/en/manual/api/reference/mediatype/create)) +- **mediatype.get**: This method allows to retrieve media types. (mandatory) ([Documentation](https://www.zabbix.com/documentation/current/en/manual/api/reference/mediatype/get)) +- **mediatype.update**: This method allows to update media types. (mandatory) ([Documentation](https://www.zabbix.com/documentation/current/en/manual/api/reference/mediatype/update)) +- **problem.get**: The method allows to retrieve problems. (mandatory) ([Documentation](https://www.zabbix.com/documentation/current/en/manual/api/reference/problem/get)) +- **script.create**: This method allows to create new scripts. (mandatory) ([Documentation](https://www.zabbix.com/documentation/current/en/manual/api/reference/script/create)) +- **script.get**: The method allows to retrieve scripts. (mandatory) ([Documentation](https://www.zabbix.com/documentation/current/en/manual/api/reference/script/get)) +- **script.update**: This method allows to update scripts. (mandatory) ([Documentation](https://www.zabbix.com/documentation/current/en/manual/api/reference/script/update)) +- **user.get**: This method allows to retrieve users. (mandatory) ([Documentation](https://www.zabbix.com/documentation/current/en/manual/api/reference/user/get)) +- **user.update**: This method allows to update users. (mandatory) ([Documentation](https://www.zabbix.com/documentation/current/en/manual/api/reference/user/update)) + + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + + + +## Provider Methods +The provider exposes the following [Provider Methods](/providers/provider-methods#via-ai-assistant). They are available in the [AI Assistant](/overview/ai-incident-assistant). + +- **close_problem** No description. (action, scopes: event.acknowledge) + + - `id`: The problem id. +- **change_severity** No description. (action, scopes: event.acknowledge) + + - `id`: The problem id. + - `new_severity`: The new severity. Can be an integer string (0-5) or severity name: +- "0" or "Not classified" +- "1" or "Information" +- "2" or "Warning" +- "3" or "Average" +- "4" or "High" +- "5" or "Disaster" +- **surrpress_problem** No description. (action, scopes: event.acknowledge) + + - `id`: The problem id. + - `suppress_until`: The datetime to suppress the problem until. +- **unsurrpress_problem** No description. (action, scopes: event.acknowledge) + + - `id`: The problem id. +- **acknowledge_problem** No description. (action, scopes: event.acknowledge) + + - `id`: The problem id. +- **unacknowledge_problem** No description. (action, scopes: event.acknowledge) + + - `id`: The problem id. +- **add_message_to_problem** No description. (action, scopes: event.acknowledge) + + - `id`: The problem id. + - `message_text`: The message text. +- **get_problem_messages** No description. (view, scopes: problem.get) + + - `id`: The problem id. diff --git a/docs/snippets/providers/zendesk-snippet-autogenerated.mdx b/docs/snippets/providers/zendesk-snippet-autogenerated.mdx new file mode 100644 index 0000000000..3d288fb519 --- /dev/null +++ b/docs/snippets/providers/zendesk-snippet-autogenerated.mdx @@ -0,0 +1,15 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: Zendesk API key (required: True, sensitive: True) +- **zendesk_domain**: Zendesk domain (required: True, sensitive: False) +- **ticket_creation_url**: URL for creating new tickets (required: False, sensitive: False) + + +## In workflows + +This provider can't be used as a "step" or "action" in workflows. If you want to use it, please let us know by creating an issue in the [GitHub repository](https://github.com/keephq/keep/issues). + + diff --git a/docs/snippets/providers/zenduty-snippet-autogenerated.mdx b/docs/snippets/providers/zenduty-snippet-autogenerated.mdx new file mode 100644 index 0000000000..5951aea977 --- /dev/null +++ b/docs/snippets/providers/zenduty-snippet-autogenerated.mdx @@ -0,0 +1,31 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **api_key**: Zenduty api key (required: True, sensitive: True) + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query zenduty + provider: zenduty + config: "{{ provider.my_provider_name }}" + with: + title: {value} # Title of the incident + summary: {value} # Summary of the incident + service: {value} # Service ID in Zenduty + user: {value} # User ID in Zenduty + policy: {value} # Policy ID in Zenduty +``` + + + +If you need workflow examples with this provider, please raise a [GitHub issue](https://github.com/keephq/keep/issues). diff --git a/docs/snippets/providers/zoom-snippet-autogenerated.mdx b/docs/snippets/providers/zoom-snippet-autogenerated.mdx new file mode 100644 index 0000000000..11e71e6876 --- /dev/null +++ b/docs/snippets/providers/zoom-snippet-autogenerated.mdx @@ -0,0 +1,41 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **account_id**: Zoom Account ID (required: True, sensitive: True) +- **client_id**: Zoom Client ID (required: True, sensitive: True) +- **client_secret**: Zoom Client Secret (required: True, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **create_meeting**: Create a new Zoom meeting (mandatory) + + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query zoom + provider: zoom + config: "{{ provider.my_provider_name }}" + with: + topic: {value} + start_time: {value} + duration: {value} + timezone: {value} + record_meeting: {value} + host_email: {value} +``` + + + + +Check the following workflow examples: +- [zoom_chat_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/zoom_chat_example.yml) +- [zoom_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/zoom_example.yml) diff --git a/docs/snippets/providers/zoom_chat-snippet-autogenerated.mdx b/docs/snippets/providers/zoom_chat-snippet-autogenerated.mdx new file mode 100644 index 0000000000..497a542be1 --- /dev/null +++ b/docs/snippets/providers/zoom_chat-snippet-autogenerated.mdx @@ -0,0 +1,42 @@ +{/* This snippet is automatically generated using scripts/docs_render_provider_snippets.py +Do not edit it manually, as it will be overwritten */} + +## Authentication +This provider requires authentication. +- **webhook_url**: Zoom Incoming Webhook Full Format Url (required: True, sensitive: True) +- **authorization_token**: Incoming Webhook Authorization Token (required: True, sensitive: True) +- **account_id**: Zoom Account ID (required: False, sensitive: True) +- **client_id**: Zoom Client ID (required: False, sensitive: True) +- **client_secret**: Zoom Client Secret (required: False, sensitive: True) + +Certain scopes may be required to perform specific actions or queries via the provider. Below is a summary of relevant scopes and their use cases: +- **user:read:user:admin**: View a Zoom user's details +- **user:read:list_users:admin**: List Zoom users + + + +## In workflows + +This provider can be used in workflows. + + + +As "action" to make changes or update data, example: +```yaml +actions: + - name: Query zoom_chat + provider: zoom_chat + config: "{{ provider.my_provider_name }}" + with: + severity: {value} # The severity of the alert. + title: {value} # The title to use for the message. (optional) + message: {value} # The text message to send. Supports Markdown formatting. + tagged_users: {value} # A list of Zoom user email addresses to tag. (optional) + details_url: {value} # A URL linking to more information. (optional) +``` + + + + +Check the following workflow example: +- [zoom_chat_example.yml](https://github.com/keephq/keep/blob/main/examples/workflows/zoom_chat_example.yml) diff --git a/docs/workflows/conditions/assert.mdx b/docs/workflows/conditions/assert.mdx deleted file mode 100644 index 820cb1c81b..0000000000 --- a/docs/workflows/conditions/assert.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -sidebarTitle: "Assert" ---- - -### The assert condition implements the "python assert" behaviour - -```yaml -- type: assert - name: REQUIRED. Must be unique among the list. - assert: REQUIRED. The assert expression to evaluate. -``` - -### Example - -```yaml -condition: - - type: assert - name: assert-condition - assert: "{{ steps.service-is-up.results.status_code }} == 200" -``` - -- If `steps.service-is-up.results.status_code` step returns 200 => `assert 200 == 200` => the conditions returns _False_ (since the assert pass) -- If `steps.service-is-up.results.status_code` step returns 404 => `assert 404 == 200` => the conditions returns _True_ (since the assert fails) diff --git a/docs/workflows/conditions/stddev.mdx b/docs/workflows/conditions/stddev.mdx deleted file mode 100644 index d61a69da1c..0000000000 --- a/docs/workflows/conditions/stddev.mdx +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: "🎯 Stddev (Standard Deviation)" -sidebarTitle: "stddev" -description: "The 'stddev' condition implements standard deviation logic. It takes a list or a list of lists, along with a standard deviation threshold ('compare_to'), and returns all values that are farther away from the mean than the standard deviation." ---- - -```yaml -- type: stddev - name: REQUIRED. Must be unique among the list. - value: REQUIRED. The input of the standard deviation algo. - pivot_column: - OPTIONAL. Integer. If supplied, any item of `value` is threatened as - a list, and the `pivot_column` is extracted from any item. - For example, if pivot_column is 1, then the second column - (zero-based) of every item of the value list is used for - the calculation (see example for more details) - compare_to: REQUIRED. Integer. The standard deviation to compare against. -``` - -### Example - -```yaml -condition: - - name: stddev-condition - type: stddev - value: "{{ steps.db-step.results }}" - pivot_column: 2 - compare_to: 1 -``` - -For this example, the output of `db-step` step is a list of rows from the db: - -`[(1, 2 ,3), (1, 4, 5), (7, 8, 9)]` - -The `pivot_column` is 2, hence the values for the stddev calculation are: -`3`, `5` and `9`. - -Next, the sttdev condition calculates the stddev: - -```math -standard deviation = sqrt(sum((x - mean)^2) / N) -mean = (3 + 5 + 9) / 3 = 5.666666666666667 -``` - -And the standard deviation (sd) is: - -``` -standard deviation = sqrt(((3-5.666666666666667)^2 + (5-5.666666666666667)^2 + (9-5.666666666666667)^2) / 3) - = sqrt((9.555555555555557 + 0.11111111111111116 + 9.555555555555557) / 3) - = sqrt(6.740740740740742) - = 2.5961484292674155 -``` - -Therefore, the standard deviation of the dataset [3, 5, 9] is approximately 2.596. - -Thus, the values that are more than 1 standard deviation from the mean are 3 and 9, since they are outside the range of 5.666666666666667+-2.5961484292674155 (which is [3.0705, 8.2628]). - -### Same example without pivot_column - -Notice that we used `pivot_column` since the output db `db-step` was a list of rows. -If the output was just list, we could skip it. - -For example, if the output of `db-step` was `(3, 5 ,9)`, we could just use: - -```yaml -condition: - - name: stddev-condition - type: stddev - value: "{{ steps.db-step.results }}" - compare_to: 1 -``` diff --git a/docs/workflows/conditions/threshold.mdx b/docs/workflows/conditions/threshold.mdx deleted file mode 100644 index 870ca40bf2..0000000000 --- a/docs/workflows/conditions/threshold.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "🎯 Threshold" -sidebarTitle: "Threshold" ---- - -### The threshold condition compare between two values, and returns positive (True) if applied. - -```yaml -- type: threshold - name: REQUIRED. Must be unique among the list. - value: REQUIRED. Left side of the comparison. - compare_to: REQUIRED. Right side of the comparison. - compare_type: OPTIONAL ("lt" or "gt". default is "gt") -``` - -### Example - -```yaml -condition: - - type: threshold - name: threshold-condition - value: "{{ steps.db-no-space.results }}" - compare_to: 10 -``` - -- If `db-no-space` step returns 11 => `value` > 10 => the conditions returns _True_ -- If `db-no-space` step returns 9.6 => `value` < 10 => the conditions returns _False_ diff --git a/docs/workflows/conditions/what-is-a-condition.mdx b/docs/workflows/conditions/what-is-a-condition.mdx deleted file mode 100644 index 9821de8c7c..0000000000 --- a/docs/workflows/conditions/what-is-a-condition.mdx +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: "❓ What is a Condition" -sidebarTitle: "What is a Condition?" ---- - -Generally speaking, a condition is: - -> A predefined rule that defines when an action should be run. - -In Keep's context, a condition is a predefined rule that decide if an action should be triggered or not. - -Each condition has its own inputs/output. -The current supported conditions: - -1. [Threshold](/core/conditions/what-is-a-condition) -2. [Assert](/core/conditions/assert) -3. [Stddev](/core/conditions/stddev) diff --git a/docs/workflows/examples/autosupress.mdx b/docs/workflows/examples/autosupress.mdx new file mode 100644 index 0000000000..88bce43240 --- /dev/null +++ b/docs/workflows/examples/autosupress.mdx @@ -0,0 +1,33 @@ +--- +title: "Suppressing Alerts Automatically" +--- + + + +Link to the [workflow](https://github.com/keephq/keep/blob/main/examples/workflows/autosupress.yml). + + + +This workflow demonstrates how to suppress alerts by marking them as dismissed. + + +Explanation: +- Trigger: Activated by any alert. +- Action: Enrich the alert by adding a `dismissed` field with the value `true`. + + +```yaml +workflow: + id: autosupress + description: demonstrates how to automatically suppress alerts + triggers: + - type: alert + actions: + - name: dismiss-alert + provider: + type: mock + with: + enrich_alert: + - key: dismissed + value: "true" +``` diff --git a/docs/workflows/examples/buisnesshours.mdx b/docs/workflows/examples/buisnesshours.mdx new file mode 100644 index 0000000000..195e262d6c --- /dev/null +++ b/docs/workflows/examples/buisnesshours.mdx @@ -0,0 +1,36 @@ +--- +title: "Executing Actions During Business Hours" +--- + + + +Link to the [workflow](https://github.com/keephq/keep/blob/main/examples/workflows/businesshours.yml). + + + +This workflow demonstrates how to take actions only during specified business hours. + + +Explanation: +- Trigger: Activated by an alert or manually. +- Action: Check if the current time falls within business hours in the `America/New_York` timezone. If yes, enrich the alert with a `businesshours` field set to `true`. + + +```yaml +workflow: + id: businesshours + description: demonstrate how to do smth only when it's business hours + triggers: + - type: alert + - type: manual + actions: + - name: dismiss-alert + if: "keep.is_business_hours(timezone='America/New_York')" + provider: + type: mock + with: + enrich_alert: + - key: businesshours + value: "true" + +``` diff --git a/docs/workflows/examples/create-servicenow-tickets.mdx b/docs/workflows/examples/create-servicenow-tickets.mdx new file mode 100644 index 0000000000..5c7d47f20e --- /dev/null +++ b/docs/workflows/examples/create-servicenow-tickets.mdx @@ -0,0 +1,47 @@ +--- +title: "Creating ServiceNow Tickets for Alerts" +--- + + + +Link to the [workflow](https://github.com/keephq/keep/blob/main/examples/workflows/create_service_now_ticket_upon_alerts.yml). + + + +This workflow creates a ServiceNow ticket whenever an alert from Grafana or Prometheus is triggered. + + +Explanation: +- Trigger: Activated by alerts from Grafana or Prometheus. +- Action: If the alert does not already have a ticket ID, create a ServiceNow ticket and enrich the alert with details like ticket ID, URL, and status. + + +```yaml +workflow: + id: servicenow + description: create a ticket in servicenow when an alert is triggered + triggers: + - type: alert + cel: source.contains("grafana") || source.contains("prometheus") + actions: + - name: create-service-now-ticket + if: "not '{{ alert.ticket_id }}' and {{ alert.annotations.ticket_type }}" + provider: + type: servicenow + config: "{{ providers.servicenow }}" + with: + table_name: "{{ alert.annotations.ticket_type }}" + payload: + short_description: "{{ alert.name }} - {{ alert.description }} [created by Keep][fingerprint: {{alert.fingerprint}}]" + description: "{{ alert.description }}" + enrich_alert: + - key: ticket_type + value: servicenow + - key: ticket_id + value: results.sys_id + - key: ticket_url + value: results.link + - key: ticket_status + value: results.stage + +``` diff --git a/docs/workflows/examples/highsev.mdx b/docs/workflows/examples/highsev.mdx new file mode 100644 index 0000000000..6916cc3d16 --- /dev/null +++ b/docs/workflows/examples/highsev.mdx @@ -0,0 +1,64 @@ +--- +title: "Handling High-Severity Sentry Alerts" +--- + + + +Link to the [workflow](https://github.com/keephq/keep/blob/main/examples/workflows/create_jira_ticket_upon_alerts.yml). + + + +This workflow handles critical alerts from Sentry based on the service they are associated with. + + + + +Explanation: +- Trigger: Activated by critical alerts from Sentry. +- Actions: +- - Send a Slack message to the payments team for alerts related to the `payments` service. +- - Create a Jira ticket for alerts related to the `ftp` service if a ticket ID is not already present. + + + +```yaml +workflow: + id: sentry-alerts + description: handle alerts + triggers: + - type: alert + cel: source.contains("sentry") && severity == "critical" && (service == "payments" || service == "ftp") + actions: + - name: send-slack-message-team-payments + if: "'{{ alert.service }}' == 'payments'" + provider: + type: slack + config: "{{ providers.team-payments-slack }}" + with: + message: | + "A new alert from Sentry: Alert: {{ alert.name }} - {{ alert.description }} + {{ alert }}" + - name: create-jira-ticket-oncall-board + if: "'{{ alert.service }}' == 'ftp' and not '{{ alert.ticket_id }}'" + provider: + type: jira + config: "{{ providers.jira }}" + with: + board_name: "Oncall Board" + custom_fields: + customfield_10201: "Critical" + issuetype: "Task" + summary: "{{ alert.name }} - {{ alert.description }} (created by Keep)" + description: | + "This ticket was created by Keep. + Please check the alert details below: + {code:json} {{ alert }} {code}" + enrich_alert: + - key: ticket_type + value: jira + - key: ticket_id + value: results.issue.key + - key: ticket_url + value: results.ticket_url + +``` diff --git a/docs/workflows/examples/multi-step-alert.mdx b/docs/workflows/examples/multi-step-alert.mdx deleted file mode 100644 index 7469ade6d6..0000000000 --- a/docs/workflows/examples/multi-step-alert.mdx +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: "Multiple steps alert example" -sidebarTitle: "Multi-Step Alert" -description: "Breakdown of the alert and further explanations can be found in the bottom of this page." ---- - -```yaml -# Check both databases prod1 and prod2 and alert if any of them has less than 10% disk space left. -alert: - id: db-disk-space - description: Check that the DB has enough disk space - steps: - - name: db-prod1-no-space - provider: - type: mock - config: "{{ providers.db-server-mock }}" - with: - command: df -h | grep /dev/disk3s1s1 | awk '{ print $5}' # Check the disk space - command_output: 91% # Mock - - name: db-prod2-no-space - provider: - type: mock - config: "{{ providers.db-server-mock }}" - with: - command: df -h | grep /dev/disk3s1s1 | awk '{ print $5}' # Check the disk space - command_output: 94.5% # Mock - actions: - - name: trigger-telegram - condition: - - type: threshold - value: "{{ steps.db-prod1-no-space.results }}" - compare_to: 90% # Trigger if more than 90% full - alias: A - - type: threshold - value: "{{ steps.db-prod2-no-space.results }}" - compare_to: 90% # Trigger if more than 90% full - alias: B - # trigger the action if any of the conditions are met: - if: "{{ A }} or {{ B }}" - provider: - type: telegram - config: - authentication: - bot_token: "{{ env.TELEGRAM_BOT_TOKEN }}" - with: - chat_id: "{{ env.TELEGRAM_CHAT_ID }}" - message: Keep Alert Test - -providers: - db-server-mock: - description: Paper DB Server - authentication: -``` - -## Breakdown - -### Steps - -In this example we can see two steps: - -- db-prod1-no-space - checks db space of db prod1 -- db-prod2-no-space - checkd db space of db prod2 - -### Conditions - -The action has two threshold conditions: - -``` -condition: - - type: threshold - value: "{{ steps.this.results }}" - compare_to: 90% # Trigger if more than 90% full -``` - -But now we've added an `alias` to each condition, so it'll be easier to check it in the `action` itself. - -### Action (if statement) - -The action now uses the `if` statement to alert if **one** of the databases has less than 10% disk space left. - -We can use `if: "{{ A }} and {{ B }}"` to alert only if both databases has less than 10% disk space left. -_Note that its the default behavior so you may achieve the same without specifying `if` statement._ diff --git a/docs/workflows/examples/reusable-action-alert.mdx b/docs/workflows/examples/reusable-action-alert.mdx deleted file mode 100644 index 3833c84fa3..0000000000 --- a/docs/workflows/examples/reusable-action-alert.mdx +++ /dev/null @@ -1,145 +0,0 @@ ---- -title: "Reusable Actions For Alert" -sidebarTitle: "Reusable actions For Alert" -description: "This example shows you how to check both database `prod1`, `prod2` and determines if any of them hit `90%` threshold of disk space then using an action template to send notification to two telegram channels." ---- - -Here is the full configurations: - -```yaml -# Check both databases prod1 and prod2 and alert if any of them has less than 10% disk space left. -alert: - id: db-disk-space - description: Check that the DB has enough disk space - steps: - - name: db-prod1-no-space - provider: - type: mock - config: "{{ providers.db-server-mock }}" - with: - command: df -h | grep /dev/disk3s1s1 | awk '{ print $5}' # Check the disk space - command_output: 91% # Mock - - name: db-prod2-no-space - provider: - type: mock - config: "{{ providers.db-server-mock }}" - with: - command: df -h | grep /dev/disk3s1s1 | awk '{ print $5}' # Check the disk space - command_output: 94.5% # Mock - actions: - - name: trigger-telegram1 - use: @trigger-telegrame - provider: - config: - authentication: - bot_token: "{{ env.TELEGRAM_BOT_TOKEN1 }}" - with: - chat_id: "{{ env.TELEGRAM_CHAT_ID1 }}" - - name: trigger-telegram2 - use: @trigger-telegrame - provider: - config: - authentication: - bot_token: "{{ env.TELEGRAM_BOT_TOKEN2 }}" - with: - chat_id: "{{ env.TELEGRAM_CHAT_ID2 }}" - -actions: - - name: trigger-telegram - use: @trigger-telegram - condition: - - type: threshold - value: "{{ steps.db-prod1-no-space.results }}" - compare_to: 90% # Trigger if more than 90% full - alias: A - - type: threshold - value: "{{ steps.db-prod2-no-space.results }}" - compare_to: 90% # Trigger if more than 90% full - alias: B - # trigger the action if any of the conditions are met: - if: "{{ A }} or {{ B }}" - provider: - type: telegram - with: - message: Keep Alert Test - -providers: - db-server-mock: - description: Paper DB Server - authentication: -``` - -## Breakdown - -### Steps - -In this example we can see two steps: - -- db-prod1-no-space - checks db space of db prod1 -- db-prod2-no-space - checkd db space of db prod2 - -### Conditions - -The action has two threshold conditions: - -``` -condition: - - type: threshold - value: "{{ steps.this.results }}" - compare_to: 90% # Trigger if more than 90% full -``` - -But now we've added an `alias` to each condition, so it'll be easier to check it in the `action` itself. - -### Action - -The action template is defined as. - -``` -actions: - - name: trigger-telegram - use: @trigger-telegram - condition: - - type: threshold - value: "{{ steps.db-prod1-no-space.results }}" - compare_to: 90% # Trigger if more than 90% full - alias: A - - type: threshold - value: "{{ steps.db-prod2-no-space.results }}" - compare_to: 90% # Trigger if more than 90% full - alias: B - # trigger the action if any of the conditions are met: - if: "{{ A }} or {{ B }}" - provider: - type: telegram - with: - message: Keep Alert Test -``` - -The action uses the `if` statement to alert if **one** of the databases has less than 10% disk space left. -Note that we don't define any telegram `chat_id` and `bot_token` here because we want to define two separate telegram credentials for the two channels. - -The credentials are defined in `actions` definitions within the `alert` configuration. -Note that we declare `use: @trigger-telegram` to use the defined action template. - -``` -alert: - ... - actions: - - name: trigger-telegram1 - use: @trigger-telegram - provider: - config: - authentication: - bot_token: "{{ env.TELEGRAM_BOT_TOKEN1 }}" - with: - chat_id: "{{ env.TELEGRAM_CHAT_ID1 }}" - - name: trigger-telegram2 - use: @trigger-telegram - provider: - config: - authentication: - bot_token: "{{ env.TELEGRAM_BOT_TOKEN2 }}" - with: - chat_id: "{{ env.TELEGRAM_CHAT_ID2 }}" -``` diff --git a/docs/workflows/examples/update-servicenow-tickets.mdx b/docs/workflows/examples/update-servicenow-tickets.mdx new file mode 100644 index 0000000000..e4587ca427 --- /dev/null +++ b/docs/workflows/examples/update-servicenow-tickets.mdx @@ -0,0 +1,44 @@ +--- +title: "Update ServiceNow Tickets" +--- + + + +Link to the [workflow](https://github.com/keephq/keep/blob/main/examples/workflows/update_service_now_tickets_status.yml). + + + +This example demonstrates how to periodically update the status of ServiceNow tickets associated with alerts. + +Explanation: +- Trigger: The workflow can be triggered manually, simulating the scheduled execution. +- Step 1: Fetch all alerts with a `ticket_type` of `servicenow` using the Keep provider. +- Action: Iterate over the fetched alerts and update their associated ServiceNow tickets with the latest status. + + +```yaml +workflow: + id: servicenow + description: update the ticket status every minute + triggers: + - type: manual + steps: + - name: get-alerts + provider: + type: keep + with: + cel: ticket_type == "servicenow" + actions: + - name: update-ticket + foreach: "{{ steps.get-alerts.results }}" + provider: + type: servicenow + config: "{{ providers.servicenow }}" + with: + ticket_id: "{{ foreach.value.alert_enrichment.enrichments.ticket_id }}" + table_name: "{{ foreach.value.alert_enrichment.enrichments.table_name }}" + fingerprint: "{{ foreach.value.alert_fingerprint }}" + enrich_alert: + - key: ticket_status + value: results.state +``` diff --git a/docs/workflows/functions/add-time-to-date.mdx b/docs/workflows/functions/add-time-to-date.mdx deleted file mode 100644 index bcd1e1fe11..0000000000 --- a/docs/workflows/functions/add-time-to-date.mdx +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "add_time_to_date(date, date_format, time_str)" -sidebarTitle: "add_time_to_date" ---- - -### Description -Adds a specified amount of time to a given date. - -### Input -- `date` (str or datetime): The date to which the time will be added. Can be a string or a datetime object. -- `date_format` (str): The format of the date string if the date is provided as a string. -- `time_str` (str): The time to add, specified as a string (e.g., '1w 2d 3h 30m'). - -### Output -A `datetime` object representing the new date with the added time. - -### Example -```yaml -workflow: - id: datadog-alerts - description: handle alerts - triggers: - - type: alert - filters: - - key: source - value: datadog - actions: - - name: set-reminder-date - provider: - type: console - with: - alert_message: keep.add_time_to_date("{{ alert.date }}", "%Y-%m-%dT%H:%M:%S.%f%z", "1w 2d 3h 30m") diff --git a/docs/workflows/functions/all.mdx b/docs/workflows/functions/all.mdx deleted file mode 100644 index 392a747ed3..0000000000 --- a/docs/workflows/functions/all.mdx +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: "all(iterable)" -sidebarTitle: "all" ---- - -### Input - -An iterable. - -### Output - -True if all items are identical, False otherwise. - -### Example - -```yaml -actions: - - name: trigger-slack - if: "keep.all({{ steps.db-step.results }})" - provider: - type: slack - config: " {{ providers.slack-demo }} " - with: - message: "Items are equal" -``` diff --git a/docs/workflows/functions/datetime-compare.mdx b/docs/workflows/functions/datetime-compare.mdx deleted file mode 100644 index d2c1db3c15..0000000000 --- a/docs/workflows/functions/datetime-compare.mdx +++ /dev/null @@ -1,28 +0,0 @@ -actions: - - name: trigger-slack - condition: - - type: threshold - # datetime_compare(t1, t2) compares t1-t2 and returns the diff in seconds - # utcnow() returns the local machine datetime in UTC - # to_utc() converts a datetime to UTC - value: keep.datetime_compare(keep.utcnow(), keep.to_utc("{{ steps.this.results[0][0] }}")) - compare_to: 3600 # seconds (1 hour) - compare_type: gt # greater than - - -from datetime import datetime - -def datetime_compare(t1: datetime, t2: datetime) -> int: - """ - Compares two datetime objects and returns the time difference in seconds. - - :param t1: First datetime object - :param t2: Second datetime object - :return: Time difference in seconds - """ - return int((t1 - t2).total_seconds()) - -# Example usage: -# t1 = datetime.utcnow() -# t2 = datetime.utcnow() - timedelta(hours=2) -# print(datetime_compare(t1, t2)) # Should return 7200 (2 hours * 3600 seconds) diff --git a/docs/workflows/functions/diff.mdx b/docs/workflows/functions/diff.mdx deleted file mode 100644 index 4abeb6c9b2..0000000000 --- a/docs/workflows/functions/diff.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "diff(iterable)" -sidebarTitle: "diff" ---- - -### Input -An iterable. - -### Output -Opposite of [`all`](all) - returns False if all items are identical, else True - -### Example -```yaml -actions: -- name: trigger-slack - if: "keep.diff({{ steps.db-step.results }})" - provider: - type: slack - config: " {{ providers.slack-demo }} " - with: - message: "Items are equal" -``` diff --git a/docs/workflows/functions/encode.mdx b/docs/workflows/functions/encode.mdx deleted file mode 100644 index e6b0ae8f25..0000000000 --- a/docs/workflows/functions/encode.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: "encode(string)" -sidebarTitle: "encode" ---- - -### Input - -string - string - -### Output - -string - URL encoded string - -### Example - -```yaml -actions: - - name: trigger-slack - condition: - - type: equals - value: keep.encode('abc def') - compare_to: "abc%20def" - compare_type: eq -``` diff --git a/docs/workflows/functions/first.mdx b/docs/workflows/functions/first.mdx deleted file mode 100644 index 2be9c7b878..0000000000 --- a/docs/workflows/functions/first.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "first(iterable)" -sidebarTitle: "first" ---- - -### Input - -An iterable. - -### Output - -The first item of the iterable. - -### Example - -```yaml -actions: - - name: keep-slack - foreach: "{{steps.this.results}}" - condition: - - type: threshold - value: "keep.first(keep.split({{ foreach.value }}, ' '))" - # each line looks like: - # ' 64 2023-02-09 20:08:16,773 INFO: uvicorn.access -: 127.0.0.1:53948 - "GET /test2 HTTP/1.1" 503 Service Unavailable' - # where the "64" is the number of the - compare_to: 70 -``` diff --git a/docs/workflows/functions/last.mdx b/docs/workflows/functions/last.mdx deleted file mode 100644 index a3ec04cee0..0000000000 --- a/docs/workflows/functions/last.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "last(iterable)" -sidebarTitle: "last" ---- - -### Input - -An iterable. - -### Output - -The last item of the iterable. - -### Example - -```yaml -actions: - - name: keep-slack - foreach: "{{steps.this.results}}" - condition: - - type: threshold - value: "keep.last(keep.split({{ foreach.value }}, ' '))" - # each line looks like: - # '2023-02-09 20:08:16,773 INFO: uvicorn.access -: 127.0.0.1:53948 - "GET /test2 HTTP/1.1" 503' - # where the "503" is the number of the - compare_to: 200 -``` diff --git a/docs/workflows/functions/len.mdx b/docs/workflows/functions/len.mdx deleted file mode 100644 index 44b0fe4eb7..0000000000 --- a/docs/workflows/functions/len.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "len(iterable)" -sidebarTitle: "len" ---- - -### Input - -An iterable. - -### Output - -Integer. The length of the iterable. - -### Example - -```yaml -condition: - - type: threshold - value: "keep.len({{ steps.db-no-space.results }})" - compare_to: 10 -``` diff --git a/docs/workflows/functions/lowercase.mdx b/docs/workflows/functions/lowercase.mdx deleted file mode 100644 index e945f4b624..0000000000 --- a/docs/workflows/functions/lowercase.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: "string(string)" -sidebarTitle: "lowercase" ---- - -### Input - -A string. - -### Output - -Returns the string which is lowercased. - -### Example - -```yaml -actions: - - name: trigger-slack - condition: - - type: equals - value: keep.lowercase('ABC DEF') - compare_to: "abc def" - compare_type: eq -``` diff --git a/docs/workflows/functions/split.mdx b/docs/workflows/functions/split.mdx deleted file mode 100644 index 56476b7e56..0000000000 --- a/docs/workflows/functions/split.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "string(string, delimeter)" -sidebarTitle: "split" ---- - -### Input - -A string and delimeter. - -### Output - -Returns the string, splitted by the delimeter. - -### Example - -```yaml -actions: - - name: keep-slack - foreach: "{{steps.this.results}}" - condition: - - type: threshold - value: "keep.first(keep.split({{ foreach.value }}, ' '))" - # each line looks like: - # ' 64 2023-02-09 20:08:16,773 INFO: uvicorn.access -: 127.0.0.1:53948 - "GET /test2 HTTP/1.1" 503 Service Unavailable' - # where the "64" is the number of the - compare_to: 70 -``` diff --git a/docs/workflows/functions/to-utc.mdx b/docs/workflows/functions/to-utc.mdx deleted file mode 100644 index 930de0dfb4..0000000000 --- a/docs/workflows/functions/to-utc.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "to_utc" ---- - -### Input - -datetime.datetime | str - -### Output - -datetime.datetime - utc converted - -### Example - -```yaml -actions: - - name: trigger-slack - condition: - - type: threshold - # datetime_compare(t1, t2) compares t1-t2 and returns the diff in hours - # utcnow() returns the local machine datetime in UTC - # to_utc() converts a datetime to UTC - value: keep.datetime_compare(keep.utcnow(), keep.to_utc("{{ steps.this.results[0][0] }}")) - compare_to: 1 # hours - compare_type: gt # greater than -``` diff --git a/docs/workflows/functions/uppercase.mdx b/docs/workflows/functions/uppercase.mdx deleted file mode 100644 index 45f3f66727..0000000000 --- a/docs/workflows/functions/uppercase.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: "string(string)" -sidebarTitle: "uppercase" ---- - -### Input - -A string. - -### Output - -Returns the string which is uppercased. - -### Example - -```yaml -actions: - - name: trigger-slack - condition: - - type: equals - value: keep.uppercase('abc def') - compare_to: "ABC DEF" - compare_type: eq -``` diff --git a/docs/workflows/functions/utcnow.mdx b/docs/workflows/functions/utcnow.mdx deleted file mode 100644 index 0cf0900dc1..0000000000 --- a/docs/workflows/functions/utcnow.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "utcnow" -sidebarTitle: "utcnow" ---- - -### Input - -N/A - -### Output - -datetime.datetime object represents utcnow - -### Example - -```yaml -actions: - - name: trigger-slack - condition: - - type: threshold - # datetime_compare(t1, t2) compares t1-t2 and returns the diff in hours - # utcnow() returns the local machine datetime in UTC - # to_utc() converts a datetime to UTC - value: keep.datetime_compare(keep.utcnow(), keep.to_utc("{{ steps.this.results[0][0] }}")) - compare_to: 1 # hours - compare_type: gt # greater than -``` diff --git a/docs/workflows/functions/what-is-a-function.mdx b/docs/workflows/functions/what-is-a-function.mdx deleted file mode 100644 index 24d2269297..0000000000 --- a/docs/workflows/functions/what-is-a-function.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "What is a Function?" -description: "In Keep's context, functions extend the power of context injection. For example, if a step returns a list, you can use the `keep.len` function to count and use the number of results instead of the actual results." ---- - - - To use a keep function, prefix it with `keep.`, for example, use `keep.len` - and not `len` - - -```yaml -condition: - - type: threshold - # Use the len of the results instead of the results - value: "keep.len({{ steps.db-no-space.results }})" - compare_to: 10 -``` - -## How to create a new function? - -Keep functions are designed to be easily extendible!
-To create a new function, all you have to do is to add it to the [functions](https://github.com/keephq/keep/blob/main/keep/functions/__init__.py) directory `__init__.py` file. diff --git a/docs/workflows/getting-started.mdx b/docs/workflows/getting-started.mdx deleted file mode 100644 index a29f002f27..0000000000 --- a/docs/workflows/getting-started.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "Getting started" -sidebarTitle: "Getting started" ---- - -# Docker-compose -The easiest way to start keep is by running it on docker-compose: -docker-compose up -`` - -# Kubernetes -Keep can be installed via Helm Chart using the following command: -``` -helm repo add keephq https://keephq.github.io/helm-charts -helm pull keephq/keep -helm install keep keephq/keep -``` - -Notice for it to work locally, you'll need this port forwarding: -`kubectl port-forward svc/keep-frontend 3000:3000` -`kubectl port-forward svc/keep-backend 8080:8080` - -For specifics, see https://github.com/keephq/keep/blob/main/chart/keep/README.md - -# Development -You could use `docker-compose.dev.yaml` to start Keep in a development mode (see Dockerfile.dev.ui / Dockerfile.dev.api) diff --git a/docs/workflows/overview.mdx b/docs/workflows/overview.mdx index fcf66c5c38..a5f0220548 100644 --- a/docs/workflows/overview.mdx +++ b/docs/workflows/overview.mdx @@ -2,45 +2,106 @@ title: "Overview" --- -Need any help with creating a Workflow? Feel free to submit an issue or join our Slack and we will help with that. -A Workflow in Keep is a YAML-based configuration file designed to manage, automate, and enrich alerts. Once uploaded to Keep, the workflow can run based on three different types of triggers: manual, alert, or interval. In this document, we'll look into each of these components in detail. + -In this section we will review the Workflow components. +You can see plenty of fully working examples at our [GitHub repo](https://github.com/keephq/keep/blob/main/examples/workflows/). -## Triggers -When you run alert with the CLI using `keep run`, the CLI run the alert regardless of the triggers. -A trigger is an event that starts the workflow. It could be a manual trigger, an alert, or an interval depending on your use case. -Keep support three types of triggers: -### Manual trigger -``` -# run manually -triggers: - - type: manual -``` + -### Alert trigger -``` -# run every time alert from cloudwatch triggered -triggers: - - type: alert - filters: - - key: source - value: cloudwatch -``` +Keep Workflow Engine designed to streamline and automate operational tasks by integrating triggers, steps, actions, and conditions. This documentation provides an overview of the core concepts used to define and execute workflows effectively. -### Interval trigger -``` -# run every 10 seconds -triggers: - - type: interval - value: 10 + +### General Structure + + +Each workflow compose of: +1. **metadata** - id, description +2. **triggers** - when this workflow runs? +3. **steps/actions** - what this workflow should do? + +The general structure of a workflow is: + +```yaml +workflow: + id: aks-example + description: aks-example + triggers: + # list of triggers + - type: manual + steps: + # list of steps + - name: some-step + provider: + type: some-provider-type + config: "{{ providers.provider_id }}" + with: + # provider configuration + - ... + actions: + - name: some-action + provider: + type: some-provider-type + with: + # provider configuration + - ... ``` -## Steps -Steps are optional and define a sequence of actions that fetch or compute data. They are used to add data to the workflow, which can be used in other steps or actions. +Let's dive into building workflows: +- [Triggers](#triggers) +- [Steps And Actions](#steps-and-actions) +- [Conditions](#conditions) +- [Functions](#functions) +- [Context](#context) +- [Providers](#providers) +- [Variables](#variables) +- [Foreach Loops](#foreach-loops) +- [Alert Enrichment](#alert-enrichment) + + +### Triggers + +Define how a workflow starts, such as manually, on a schedule, or in response to alerts with optional filters for specific conditions. + +[See syntax](/workflows/syntax/triggers) + +### Steps And Actions + +Represent sequential operations, like querying data or running scripts, using configurable providers. + +[See syntax](/workflows/syntax/steps-and-actions) + +### Conditions + +Allow decision-making in actions based on thresholds, assertions, or previous step results. + +[See syntax](/workflows/syntax/conditions) + +### Functions + +Built-in helpers like datetime_compare or is_business_hours simplify complex operations. + +[See syntax](/workflows/syntax/functions) + +### Context + +Enables access to and reuse of outputs from earlier steps within actions or conditions. + +[See syntax](/workflows/syntax/context) + +### Providers + +External systems or services (e.g., Slack, Datadog, ServiceNow) integrated into workflows through a standard configuration interface. + +[See syntax](/workflows/syntax/providers) + +### Foreach Loops + +Iterate over a list of results from a step to perform repeated actions for each item. + +[See syntax](/workflows/syntax/foreach) + +### Alert Enrichment -## Actions -An action defines what to do when a workflow is triggered. Actions usually rely on providers for executing specific tasks, like sending a Slack message. +Add context to alerts, like customer details or ticket metadata, using enrichment mechanisms in steps or actions. -## Conditions -A condition sets the rules under which an action should be performed. For example, you can set a condition to only trigger an action if certain criteria are met. +[See syntax](/workflows/syntax/enrichment) diff --git a/docs/workflows/state.mdx b/docs/workflows/state.mdx deleted file mode 100644 index 4b4a0c09ab..0000000000 --- a/docs/workflows/state.mdx +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: "State" ---- - -## Intro -Keep State Manager is currently used for: -1. Throttling -2. Track alerts over time -3. Previous runs context - -State is currently being saved as a JSON file under `./state/keepstate.json`, a path that can be overriden by setting the `KEEP_STATE_FILE` environment variable. - -## Example -One of the usages for Keep's state mechanism is throttling, see [One Until Resolved](../025_throttles/02-one-until-resolved.md) Keep handles it for you behind the scenes so you can use it without doing any further modifications. - -## Serverless -If you are running Keep on production, you should host the `keepstate.json` file on persistance storage and mount it to your serverless environment. Feel free to create an issue if you need solution for your preferred deployment architecture. - -## Keep state structure -An example for a simple state file: -``` -{ - "service-is-up": [ - { - "alert_status": "resolved", - "alert_context": { - "alert_id": "service-is-up", - "alert_owners": [], - "alert_tags": [], - "alert_steps_context": { - "step1": { - "conditions": {}, - "results": {} - } - } - } - } - ] -} -``` - -### Roadmap - -Keep's roadmap around state (great first issues): -- Saving state in a database. -- Hosting state in buckets (AWS, GCP and Azure -> read/write). -- Enriching state with more context so throttling mechanism would be flexer. diff --git a/docs/workflows/syntax/basic-syntax.mdx b/docs/workflows/syntax/basic-syntax.mdx deleted file mode 100644 index faad12ab4d..0000000000 --- a/docs/workflows/syntax/basic-syntax.mdx +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: "Basic Syntax" -description: "At Keep, we view alerts as workflows, which consist of a series of steps executed in sequence, each with its own specific input and output. To keep our approach simple, Keep's syntax is designed to closely resemble the syntax used in GitHub Actions. We believe that GitHub Actions has a well-established syntax, and there is no need to reinvent the wheel." ---- -## Full Example -```yaml title=examples/raw_sql_query_datetime.yml -# Notify if a result queried from the DB is above a certain thershold. -workflow: - id: raw-sql-query - description: Monitor that time difference is no more than 1 hour - steps: - - name: get-max-datetime - provider: - type: mysql - config: "{{ providers.mysql-prod }}" - with: - # Get max(datetime) from the random table - query: "SELECT MAX(datetime) FROM demo_table LIMIT 1" - actions: - - name: trigger-slack - condition: - - name: threshold-condition - type: threshold - # datetime_compare(t1, t2) compares t1-t2 and returns the diff in hours - # utcnow() returns the local machine datetime in UTC - # to_utc() converts a datetime to UTC - value: keep.datetime_compare(keep.utcnow(), keep.to_utc("{{ steps.this.results[0][0] }}")) - compare_to: 1 # hours - compare_type: gt # greater than - provider: - type: slack - config: " {{ providers.slack-demo }} " - with: - message: "DB datetime value ({{ actions.trigger-slack.conditions.threshold.0.compare_value }}) is greater than 1! 🚨" -``` - -## Breakdown 🔨 - -### Workflow -```yaml -workflow: - id: raw-sql-query - description: Monitor that time difference is no more than 1 hour - steps: - - - actions: - - -``` - -`Workflow` is built of: -- Metadata (id, description. owners and tags will be added soon) -- `steps` - list of steps -- `actions` - list of actions -- `on-failure` - a conditionless action used in case of an alert failure - -### Provider -```yaml -provider: - type: mysql - config: "{{ providers.mysql-prod }}" - with: - query: "SELECT MAX(datetime) FROM demo_table LIMIT 1" - on-failure: - retry: - count: 4 - interval: 10 -``` -`Provider` is built of: -- `type` - the type of the provider ([see supported providers](/workflows/providers/)) -- `config` - the provider configuration. you can either supply it explicitly or using `"{{ providers.mysql-prod }}"` -- `with` - all type-specific provider configuration. for example, for `mysql` we will provide the SQL query. -- `on-failure` - handling the error when provider execution fails, it is built of: - - `retry` - specifies the retry parameters which include: - - `count`: maximum number of retries. - - `interval`: duration in seconds between each retry. - -### Condition -```yaml -- name: threshold-condition - type: threshold - value: keep.datetime_compare(keep.utcnow(), keep.to_utc("{{ steps.this.results[0][0] }}")) - compare_to: 1 - compare_type: gt -``` -`Condition` is built of: -- `name` - a unique identifier to the condition -- `type` - the type of the condition -- `value` - the value that will be supplied to the condition during the alert execution -- `compare_to` - whats `value` will be compared against -- `compare_type` - all type-specific condition configuration - -### Steps/Actions -```yaml -steps/actions: - - name: trigger-slack - condition: - - name: threshold-condition - type: threshold - value: keep.datetime_compare(keep.utcnow(), keep.to_utc("{{ steps.this.results[0][0] }}")) - compare_to: 1 - compare_type: gt - provider: - type: slack - config: " {{ providers.slack-demo }} " - with: - message: "DB datetime value ({{ actions.trigger-slack.conditions.threshold.0.compare_value }}) is greater than 1! 🚨" -``` - -`Step/Action` is built of: -- `name` - the name of the action. -- `condition` - a list of conditions that -- `provider` - the provider that will trigger the action. -- `throttle` - you can [throttle](../025_throttles/01-what-is-throttle.md) the action. -- `if` - action can be limited to when certain [conditions](../023_conditions/01-what-is-a-condition.md) are met. -- `foreach` - when `foreach` block supplied, Keep will evaluate it as a list, and evaluates the `action` for every item in the list. - -The `provider` configuration is already covered in [Providers](syntax#provider) - -### On-failure -```yaml -on-failure: - # Just need a provider we can use to send the failure reason - provider: - type: slack - config: " {{ providers.slack-demo }} " -``` - -On-failure is actually a condtionless `Action` used to notify in case the alert failed with an exception. -The provider is passed a `message` (string) to it's `notify` function. diff --git a/docs/workflows/syntax/conditions.mdx b/docs/workflows/syntax/conditions.mdx new file mode 100644 index 0000000000..688b13c74a --- /dev/null +++ b/docs/workflows/syntax/conditions.mdx @@ -0,0 +1,128 @@ +--- +title: "Conditions" +--- + +# Conditions + +Attach a condition to any step or action to decide at runtime whether it should run. A condition is a mustache expression that can reference outputs from earlier steps, workflow variables, or any other data in the execution context. + +Using conditions, you can introduce decision-making into workflows by asserting values, thresholds, or specific states. + +### Simple `if` condition + +```yaml +actions: + - name: notify-slack + if: "{{ alert.cpu_load }} == '70'" + provider: + type: slack + config: "{{ providers.slack }}" + with: + message: "The CPU load exceeded the threshold!" +``` + + + **Values of variables will be quoted when evaluated**. For example, if + `alert.cpu_load` is `70`, it will resolve to `'70'` (number quoted with single + quotes). + + +### Using results of other steps in condition + +```yaml +workflow: + id: query-and-alert + description: "Query a database and notify only if a threshold is met" + steps: + - name: get-disk-usage + provider: + type: mysql + config: "{{ providers.mysql-prod }}" + with: + query: "SELECT disk_usage FROM metrics WHERE server = 'db1'" + single_row: true + + actions: + - name: notify-slack + if: "{{ steps.get-disk-usage.results.disk_usage }} > 90" + provider: + type: slack + config: "{{ providers.slack }}" + with: + message: "Disk usage is critical: {{ steps.get-disk-usage.results.disk_usage }}%" +``` + +### Complex logic + +```yaml +actions: + - name: create-incident + if: "{{ steps.get-alert.results.severity }} == 'critical' and {{ steps.get-alert.results.source }} == 'datadog'" + provider: + type: servicenow + config: "{{ providers.servicenow }}" + with: + table_name: INCIDENT + payload: + short_description: "Critical Datadog alert received" +``` + +### Condition with foreach + +```yaml +actions: + - name: process-pods + foreach: "{{ steps.get-pods.results }}" + if: "{{ foreach.value.status.phase }} == 'Failed'" + provider: + type: slack + with: + message: "Pod {{ foreach.value.metadata.name }} has failed!" +``` + +## Condition with constants + +```yaml +consts: + max_load: 70 +actions: + - name: process-pods + if: "{{ alert.cpu_load }} > {{ consts.max_load }}" + provider: + type: slack + with: + message: "Pod {{ foreach.value.metadata.name }} has failed!" +``` + +--- + +## Explicit condition blocks (deprecated) + + + Explicit condition blocks are deprecated and will be discontinued. Use the + `if` syntax instead. + + +### assert (deprecated) + +Checks whether a specific assertion is true. + +```yaml +condition: + - name: assert-condition + type: assert + assert: "{{ steps.get-data.results.value }} == 'expected'" +``` + +### threshold (deprecated) + +Compares a value to a threshold using operators like `>` (gt) and `<` (lt), defaults to `>` (gt). + +```yaml +condition: + - name: threshold-condition + type: threshold + value: "{{ steps.get-data.results.value }}" + compare_to: 100 + compare_type: gt +``` diff --git a/docs/workflows/syntax/context-syntax.mdx b/docs/workflows/syntax/context-syntax.mdx deleted file mode 100644 index ad8749841f..0000000000 --- a/docs/workflows/syntax/context-syntax.mdx +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: "Working with context" -sidebarTitle: "Content Syntax" -description: "Keep uses [Mustache](https://mustache.github.io/) syntax to inject context at runtime, supporting functions, dictionaries, lists, and nested access." ---- - -Here are some examples: - -- `{{ steps.step-id.results }}` - Result of step-id -- `keep.len({{ steps.step-id.results }})` - Number of results from step-id -- `{{ steps.this.results[0] }}` - First result of this step -- `keep.first({{ steps.this.results }})` - First result (equivalent to the previous example) -- `{{ steps.step-id.results[0][0] }}` - First item of the first result - -If you have suggestions/improvments/bugs for Keep's syntax, please [open feature request](https://github.com/keephq/keep/issues/new?assignees=&labels=&template=feature_request.md&title=) and get eternal glory. - -### Special context - -Keep provides two special context containers - `providers` and `steps` - -### Providers - -Provider configuration typically look like: - -```yaml -provider: - type: mysql - config: "{{ providers.mysql-prod }}" - with: - # Get max(datetime) from the random table - query: "SELECT MAX(datetime) FROM demo_table LIMIT 1" -``` - -Here, `{{ providers.mysql-prod }}` is dynamically translated at runtime from the providers.yaml file. - -### Steps - -The output of steps can be accessed from anywhere in the YAML using `{{ steps.step-name.results }}`. This output can be used in conditions, actions, or any other place. - -### Functions - -Keep's syntax allow to use functions on context blocks. For example, `keep.len({{ steps.step-name.results }})` will return the number of results of `step-name` step. - -- [See supported functions](/core/functions/what-is-a-function) -- [Create new functions](core/functions/what-is-a-function#how-to-create-a-new-function) - -Under the hood, Keep uses Python's `ast` module to parse these expressions and evaluate them as best as possible. diff --git a/docs/workflows/syntax/context.mdx b/docs/workflows/syntax/context.mdx new file mode 100644 index 0000000000..b18ca676cb --- /dev/null +++ b/docs/workflows/syntax/context.mdx @@ -0,0 +1,147 @@ +--- +title: "Context" +--- + +The **Context** in Keep workflows allows you to reference and utilize data dynamically across different parts of your workflow. Context variables give you access to runtime data such as alert details, results from previous steps or actions, and constants defined in your workflow. + +This capability makes workflows flexible, reusable, and able to handle complex scenarios dynamically. + +--- + +## Accessing Context + +Context variables can be accessed using curly braces (`{{ }}`). You can use these variables directly in triggers, steps, and actions. The context includes: + +1. **Alert Data**: Access data from the alert triggering the workflow. +2. **Incident Data**: If the workflow is incident-based, you can access the incident's attributes. +3. **Steps and Actions Results**: Retrieve data produced by previous steps or actions using their unique IDs. + +### Alert Data + +You can access attributes of the alert anywhere in the workflow: + +```yaml +message: "Alert triggered: {{ alert.name }} - Severity: {{ alert.severity }}" +``` + +### Incident Data + +For incident workflows, access incident-related context: + +```yaml +if: "{{ incident.current_tier == 1 }}" +``` + +### Steps Results + +Access results from previous steps: + +```yaml +message: "Query results: {{ steps.get-max-datetime.results }}" +``` + +### Action Results + +Retrieve data from completed actions: + +```yaml +if: "{{ actions.trigger-email.results.success }}" +``` + +### Constants + +Define reusable values in the workflow and access them: + +```yaml +consts: + alert_message: "Critical system alert!" + escalation_policy: "tier-1" + slack_channels: + sre_team: CH00001 + payments_team: CH00002 +actions: + - name: notify-slack + if: "{{alert.source}} == 'datadog'" + provider: + type: slack + config: "{{ providers.slack }}" + with: + channel: "{{ consts.slack_channels.sre_team }}" + message: "{{ consts.alert_message }}" +``` + +## Using Context in Loops + +When iterating over data in a `foreach` loop, the context provides `foreach.value` for the current iteration. + +For example: + +```yaml +steps: + - name: get-alerts + provider: + type: keep + with: + query: "status == 'firing'" + +actions: + - name: notify-on-alerts + foreach: "{{ steps.get-alerts.results }}" + provider: + type: slack + with: + message: "Alert: {{ foreach.value.name }} is firing!" +``` + +--- + +## Examples of Context Usage + +### Dynamic Action Execution + +Using context to trigger actions conditionally: + +```yaml +actions: + - name: escalate-alert + if: "{{ alert.severity == 'critical' }}" + provider: + type: slack + with: + message: "Critical alert: {{ alert.name }}" +``` + +### Enriching Alerts + +You can use results from a step to enrich an alert + +```yaml +steps: + - name: fetch-customer-details + provider: + type: mysql + with: + query: "SELECT * FROM customers WHERE id = '{{ alert.customer_id }}'" + single_row: true + +actions: + - name: enrich-alert + provider: + type: mock + with: + enrich_alert: + - key: customer_name + value: "{{ steps.fetch-customer-details.results.name }}" +``` + +### Conditional Logic Based on Step Results + +```yaml +actions: + - name: trigger-slack + if: "{{ steps.get-pods.results.0.status.phase == 'Running' }}" + provider: + type: slack + with: + message: "Pod is running: {{ steps.get-pods.results.0.metadata.name }}" +``` diff --git a/docs/workflows/syntax/enrichment.mdx b/docs/workflows/syntax/enrichment.mdx new file mode 100644 index 0000000000..f4e84a983a --- /dev/null +++ b/docs/workflows/syntax/enrichment.mdx @@ -0,0 +1,152 @@ +--- +title: "Enrichment" +--- + +Keep workflows support **enrichment**, a powerful feature that allows you to enhance alerts with additional data, making them more actionable and meaningful. Enrichments add custom fields or modify existing ones in an alert directly from your workflow. + +--- + +## Why Enrich Alerts? + +- **Provide Context:** Add critical information, such as related customer data or ticket IDs. +- **Enable Automation:** Use enriched fields in subsequent actions for dynamic processing. +- **Improve Visibility:** Surface essential metadata for better decision-making. + +--- + +## How to Enrich Alerts + +### Using the `enrich_alert` Directive + +The `enrich_alert` directive is used in actions to add or update fields in the alert. You specify a list of key-value pairs where: +- `key` is the field name to add or update. +- `value` is the data to assign to the field. It can be a static value or dynamically derived from steps or other parts of the workflow. +- `disposable` is an optional attribute that determines whether the enrichment is temporary and should be discarded when a new alert is received. If disposable is set to True, the enrichment is added to disposable_enrichments and marked with dispose_on_new_alert=True. + +### Example Workflow with Enrichment + +```yaml +workflow: + id: enrich-alert-example + description: Demonstrates enriching alerts + triggers: + - type: alert + steps: + - name: get-customer-details + provider: + type: mysql + config: "{{ providers.mysql-prod }}" + with: + query: "SELECT * FROM customers WHERE customer_id = '{{ alert.customer_id }}'" + single_row: true + actions: + - name: enrich-alert-with-customer-data + provider: + type: mock + with: + enrich_alert: + - key: customer_name + value: "{{ steps.get-customer-details.results.name }}" + - key: customer_tier + value: "{{ steps.get-customer-details.results.tier }}" +``` + +In this example: +- The `get-customer-details` step fetches customer data based on the alert. +- The `enrich_alert` directive adds `customer_name` and `customer_tier` to the alert. + +--- + + +## Enrichment Syntax + +### Key-Value Pairs +Each enrichment is defined as a key-value pair: + +```yaml +enrich_alert: + - key: field_name + value: field_value + disposable: true +``` + +- **Static Values:** Use static strings or numbers for straightforward enrichments: +```yaml +- key: alert_source + value: "Monitoring System" +``` + +-- **Dynamic Values:** Use values derived from steps, actions, or the alert itself: +```yaml +- key: severity_level + value: "{{ alert.severity }}" +``` + +### Conditional Enrichment + +You can combine enrichment with conditions to enrich alerts dynamically: + +```yaml +actions: + - name: enrich-critical-alert + if: "{{ alert.severity == 'critical' }}" + provider: + type: mock + with: + enrich_alert: + - key: priority + value: high +``` + +## Advanced Use Cases + + +### Enrich Alerts with Results from Actions +Enrichments can use results from actions, allowing dynamic updates based on previous steps: +```yaml +enrich_alert: + - key: ticket_id + value: "{{ actions.create-ticket.results.ticket_id }}" + - key: ticket_url + value: "{{ actions.create-ticket.results.ticket_url }}" + +``` + +## Enrichment Workflow Example + +This example demonstrates how to enrich an alert with ticket details from ServiceNow: + +```yaml +workflow: + id: servicenow-ticket-enrichment + triggers: + - type: alert + steps: + - name: fetch-alert-details + provider: + type: keep + with: + filter: "alert_id == '{{ alert.id }}'" + actions: + - name: create-servicenow-ticket + provider: + type: servicenow + config: "{{ providers.servicenow }}" + with: + table_name: INCIDENT + payload: + short_description: "Alert: {{ alert.name }}" + description: "{{ alert.description }}" + enrich_alert: + - key: ticket_id + value: "{{ results.sys_id }}" + - key: ticket_url + value: "{{ results.link }}" + +``` + +## Troubleshooting Enrichment + + +### Enrichment without an Alert/Incident +If there is no alert/incident present in the trigger (for example interval trigger or manual call in workflow page), the enrichment rule would not have an alert/incident to apply to. The enrichment process typically requires an alert/incident to be present to apply the specified enrichments. Without an alert/incident, the enrichment rule would not execute as intended. A workaround is to use a foreach directive and pass it an object containing the "fingerprint" variable. diff --git a/docs/workflows/syntax/foreach-syntax.mdx b/docs/workflows/syntax/foreach-syntax.mdx deleted file mode 100644 index 310f7ceb06..0000000000 --- a/docs/workflows/syntax/foreach-syntax.mdx +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: "Foreach" -sidebarTitle: "Foreach Syntax" -description: "Foreach syntax add the flexability of running action per result instead of only once on all results." ---- - -## Usage -There are two main operations mode for `foreach`: -1. In the [steps](#steps-section) section. -2. In the [action](#actions-section) section. - -When you enter a `foreach` context, you can use `{{ foreach.value }}` to use the specific value. -Let's review how to use `foreach`. - -### Steps section -Using `foreach` in `steps`, let you run a step for each result of a previous step. -In other words: -1. Run some step. -2. For each result of the previous step, run another step. - -For example, in this alert, we: -1. Get all node id's (`get-node-ids` step). -2. For each node, get the data for result id (`get-filesystems-by-node-id` step). - -```yaml - steps: - # Get all nodes ids - - name: get-node-ids - provider: - type: postgres - config: "{{ providers.postgres-server }}" - with: - query: "select distinct(node_id) from filesystem;" - # For each node id, get the filesystem status and find filesystems in node that are not balanced - - name: get-filesystems-by-node-id - foreach: "{{ steps.get-node-ids.results }}" - provider: - type: postgres - config: "{{ providers.postgres-server }}" - with: - query: "select * from filesystem where node_id = '{{ foreach.value[0] }}';" -``` - - -In this case, `foreach.value` contains a row from the database, and `foreach.value[0]` is the first column of this row. - -### Actions section -Now, let's see how `foreach` can be used in the `actions` section. - -In the following example, we are using `foreach` twice: -1. `foreach: "{{ steps.get-filesystems-by-node-id.results }}"` - iterate over the results of `get-filesystems-by-node-id` results -2. `{{#foreach.stddev}}` - using mustache syntax, we iterate over `foreach.stddev` results. - -#### Wait, but what's `foreach.stddev`? -> **tldr**: conditions can extend `foreach` with other attributes, to support more context. - -Due to the fact that conditions work on `foreach.value`, we can extend `foreach` with other attributes. -For example, the `threshold` condition extends `foreach` with `level`, so you can use `foreach.level`, and `stddev` condition extends `foreach` with `stddev` attribute. - - -```yaml -actions: - - name: push-alert-to-postgres - # Run on get-filesystems-by-node-id results. - # Notice each result is a list of filesystems in node - foreach: "{{ steps.get-filesystems-by-node-id.results }}" - # Alert on nodes that have filesystems that away from the standard deviation - condition: - - name: stddev-condition - type: stddev - # foreach.value contain a list of rows from the database - value: "{{ foreach.value }}" - pivot_column: 8 # 8th column is the filesystem usage percentage - compare_to: 1 - - provider: - type: postgres - config: "{{ providers.postgres-server }}" - with: - query: > - INSERT INTO alert (alert_level, alert_message) - VALUES ('major', 'The node {{ foreach.value[0][4] }} has filesystems that are not balanced: - {{#foreach.stddev}} - - Filesystem {{ value[0] }} is {{stddev}} away from the standard deviation - {{/foreach.stddev}}') -``` diff --git a/docs/workflows/syntax/foreach.mdx b/docs/workflows/syntax/foreach.mdx new file mode 100644 index 0000000000..6da31d348d --- /dev/null +++ b/docs/workflows/syntax/foreach.mdx @@ -0,0 +1,90 @@ +--- +title: "Foreach" +--- + +The `foreach` directive in Keep workflows allows you to iterate over a list of items and perform actions for each item. This is particularly useful for processing multiple results returned by a step or performing actions on a collection of entities. + +## Key Features + +- **Dynamic Iteration:** Iterate over any list or array returned by a step or defined in the workflow. +- **Scoped Variables:** Each iteration exposes the current item under the `foreach` variable, allowing you to access its properties directly. +- **Action Chaining:** Multiple actions can use `foreach` to work sequentially on the same list of items. + +--- + +## Defining a `foreach` + +To use `foreach`, include it as part of an action. The value of `foreach` should be a reference to the list you want to iterate over. + +### Example Workflow with `foreach` + +```yaml +workflow: + id: foreach-example + description: Demonstrates the use of foreach + triggers: + - type: manual + steps: + - name: get-pods + provider: + type: gke + config: "{{ providers.gke }}" + with: + command_type: get_pods + actions: + - name: echo-pod-status + foreach: "{{ steps.get-pods.results }}" + provider: + type: console + with: + message: "Pod name: {{ foreach.value.metadata.name }} || Namespace: {{ foreach.value.metadata.namespace }} || Status: {{ foreach.value.status.phase }}" +``` + +In this example: + +- The `get-pods` step retrieves a list of Kubernetes pods. +- The `foreach` iterates over the `results` returned by the `get-pods` step. +- For each pod, it prints its `name`, `namespace`, and `status.` + +--- + + +## Using `foreach` Variables + +The `foreach` variable provides scoped access to the current item in the iteration. + +### Example of Scoped Variables + +```yaml +actions: + - name: notify-pod-status + foreach: "{{ steps.get-pods.results }}" + provider: + type: slack + with: + message: | + Pod Name: {{ foreach.value.metadata.name }} + Namespace: {{ foreach.value.metadata.namespace }} + Status: {{ foreach.value.status.phase }} + +``` + +In this case: +- `{{ foreach.value }}` refers to the current item in the list. +- Access properties like `metadata.name`, `metadata.namespace`, and `s`tatus.phase` dynamically. + + +### Using Conditions with `foreach` + +You can combine `foreach` with `if` conditions to filter or act selectively. + +```yaml +actions: + - name: alert-critical-pods + foreach: "{{ steps.get-pods.results }}" + if: "{{ foreach.value.status.phase == 'Failed' }}" + provider: + type: slack + with: + message: "Critical pod failure detected: {{ foreach.value.metadata.name }}" +``` diff --git a/docs/workflows/syntax/functions.mdx b/docs/workflows/syntax/functions.mdx new file mode 100644 index 0000000000..37e58d4d45 --- /dev/null +++ b/docs/workflows/syntax/functions.mdx @@ -0,0 +1,760 @@ +--- +title: "Functions" +--- + +The **Functions** in Keep Workflow Engine are utilities that can be used to manipulate data, check conditions, or perform transformations within workflows. This document provides a brief overview and usage examples for each available function. + +--- + +## Mathematical Functions + +### `add` + +**Description:** Adds all provided numbers together. All arguments are converted to integers. + +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.add(1, 2, 3) # Output: 6 + message2: keep.add(10, 20, 30) # Output: 60 +``` + +--- + +### `sub` + +**Description:** Subtracts all subsequent numbers from the first number. All arguments are converted to integers. + +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.sub(10, 2, 3) # Output: 5 + message2: keep.sub(100, 20, 30) # Output: 50 +``` + +--- + +### `mul` + +**Description:** Multiplies all provided numbers together. All arguments are converted to integers. + +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.mul(2, 3, 4) # Output: 24 + message2: keep.mul(5, 6, 7) # Output: 210 +``` + +--- + +### `div` + +**Description:** Divides the first number by all subsequent numbers. All arguments are converted to integers. Returns an integer if the division result is whole, otherwise returns a floating-point number. + +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.div(10, 2) # Output: 5 + message2: keep.div(10, 3) # Output: 3.3333333333333335 + message3: keep.div(100, 2, 5) # Output: 10 +``` + +--- + +### `mod` + +**Description:** Calculates the remainder of dividing the first number by all subsequent numbers sequentially. All arguments are converted to integers. + +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.mod(10, 3) # Output: 1 + message2: keep.mod(100, 30, 7) # Output: 2 +``` + +--- + +### `exp` + +**Description:** Raises the first number to the power equal to the product of all subsequent numbers. All arguments are converted to integers. + +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.exp(2, 3) # Output: 8 + message2: keep.exp(2, 3, 2) # Output: 64 +``` + +--- + +### `fdiv` + +**Description:** Performs integer division of the first number by all subsequent numbers sequentially. All arguments are converted to integers. + +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.fdiv(10, 3) # Output: 3 + message2: keep.fdiv(100, 3, 2) # Output: 16 +``` + +--- + +### `eq` + +**Description:** Checks if two values are equal. + +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.eq(5, 5) # Output: true + message2: keep.eq("hello", "world") # Output: false + message3: keep.eq([1, 2, 3], [1, 2, 3]) # Output: true +``` + +--- + +## String Functions + +### `uppercase` + +**Description:** Converts a string to uppercase. + +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: "keep.uppercase('hello world')" # Output: "HELLO WORLD" +``` + +--- + +### `lowercase` + +**Description:** Converts a string to lowercase. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: "keep.lowercase('HELLO WORLD')" # Output: "hello world" +``` + +--- + +### `capitalize` + +**Description:** Capitalizes the first character of a string. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.capitalize("hello world") # Output: "Hello world" +``` + +--- + +### `title` + +**Description:** Converts a string to title case (capitalizes each word). +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.title("hello world") # Output: "Hello World" +``` + +--- + +### `split` + +**Description:** Splits a string into a list using a delimiter. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: "keep.split('a,b,c', ',')" # Output: ["a", "b", "c"] +``` + +--- + +### `strip` + +**Description:** Removes leading and trailing whitespace from a string. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.strip(" hello world ") # Output: "hello world" +``` + +--- + +### `replace` + +**Description:** Replaces occurrences of a substring with another string. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.replace("hello world", "world", "Keep") # Output: "hello Keep" +``` + +--- + +### `remove_newlines` + +**Description:** Removes all newline and tab characters from a string. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.remove_newlines("hello\nworld\t!") # Output: "helloworld!" +``` + +--- + +### `encode` + +**Description:** URL-encodes a string. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.encode("hello world") # Output: "hello%20world" +``` + +--- + +### `slice` + +**Description:** Extracts a portion of a string based on start and end indices. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.slice("hello world", 0, 5) # Output: "hello" +``` + +--- + +## List and Dictionary Functions + +### `first` + +**Description:** Retrieves the first element from a list. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.first([1, 2, 3]) # Output: 1 +``` + +--- + +### `last` + +**Description:** Retrieves the last element from a list. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.last([1, 2, 3]) # Output: 3 +``` + +--- + +### `index` + +**Description:** Retrieves an element at a specific index from a list. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.index(["a", "b", "c"], 1) # Output: "b" +``` + +--- + +### `join` + +**Description:** Joins a list of elements into a string using a delimiter. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.join(["a", "b", "c"], ",") # Output: "a,b,c" +``` + +--- + +### `len` + +**Description:** Returns the length of a list. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.len([1, 2, 3]) # Output: 3 +``` + +--- + +### `dict_to_key_value_list` + +**Description:** Converts a dictionary into a list of key-value pairs. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.dict_to_key_value_list({"a": 1, "b": 2}) # Output: ["a:1", "b:2"] +``` + +--- + +### `dict_pop` + +**Description:** Removes specified keys from a dictionary. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.dict_pop({"a": 1, "b": 2, "c": 3}, "a", "b") # Output: {"c": 3} +``` + +--- + +### `dict_pop_prefix` + +**Description:** Removes all keys that start with a specified prefix from a dictionary. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.dict_pop_prefix({"a_1": 1, "a_2": 2, "b_1": 3}, "a_") # Output: {"b_1": 3} +``` + +--- + +### `dict_filter_by_prefix` + +**Description:** Returns only the dictionary entries whose keys start with a specified prefix. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.dict_filter_by_prefix({"a_1": 1, "a_2": 2, "b_1": 3}, "a_") # Output: {"a_1": 1, "a_2": 2} +``` + +--- + +### `dictget` + +**Description:** Gets a value from a dictionary with a default fallback. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.dictget({"a": 1, "b": 2}, "c", "default") # Output: "default" +``` + +--- + +## Date and Time Functions + +### `from_timestamp` + +**Description:** Converts unix timestamp int, float or string to datetime object, with optional timezone option. + +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: console + with: + message: keep.from_timestamp(1717244449.0) # will print "2024-06-01 12:20:49+00:00" + # or with timezone + # message: keep.from_timestamp(1717244449.0, "Europe/Berlin") # will print "2024-06-01 14:20:49+02:00" +``` + +### `utcnow` + +**Description:** Returns the current UTC datetime. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.utcnow() +``` + +--- + +### `utcnowtimestamp` + +**Description:** Returns the current UTC datetime as a Unix timestamp (seconds since epoch). +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.utcnowtimestamp() # Output: 1704067200 +``` + +--- + +### `utcnowiso` + +**Description:** Returns the current UTC datetime in ISO format. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.utcnowiso() +``` + +--- + +### `to_utc` + +**Description:** Converts a datetime string or object to UTC. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.to_utc("2024-01-01T00:00:00") +``` + +--- + +### `to_timestamp` + +**Description:** Converts a datetime object or string into a Unix timestamp. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.to_timestamp("2024-01-01T00:00:00") +``` + +--- + +### `datetime_compare` + +**Description:** Compares two datetime objects and returns the difference in hours. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.datetime_compare("2024-01-01T10:00:00", "2024-01-01T00:00:00") # Output: 10.0 +``` + +--- + +### `is_business_hours` + +**Description:** Checks whether a given time falls within business hours. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.is_business_hours( + time_to_check="2024-01-01T14:00:00Z", + start_hour=8, + end_hour=20, + business_days=[0,1,2,3,4], + timezone="America/New_York" + ) +``` + +--- + +## JSON Functions + +### `json_dumps` + +**Description:** Converts a dictionary or string into a formatted JSON string. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.json_dumps({"key": "value"}) +``` + +--- + +### `json_loads` + +**Description:** Parses a JSON string into a dictionary. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.json_loads('{"key": "value"}') +``` + +--- + +## Utility Functions + +### `get_firing_time` + +**Description:** Calculates the firing duration of an alert in specified time units. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.get_firing_time(alert, "m", tenant_id="tenant-id") # Output: "15.0" +``` + +--- + +### `add_time_to_date` + +**Description:** Adds time to a date string based on specified time units. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.add_time_to_date("2024-01-01", "%Y-%m-%d", "1w 2d") # Output: "2024-01-10" +``` + +--- + +### `timestamp_delta` + +**Description:** Adds or subtracts a time delta to/from a datetime. Use negative values to subtract time. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + # Add 2 hours to the current time + add_hours: keep.timestamp_delta(keep.utcnow(), 2, "hours") + + # Subtract 30 minutes from a specific datetime + subtract_minutes: keep.timestamp_delta("2024-01-01T12:00:00Z", -30, "minutes") # Output: 2024-01-01T11:30:00Z + + # Add 1 week to a datetime + add_week: keep.timestamp_delta("2024-01-01T00:00:00Z", 1, "weeks") # Output: 2024-01-08T00:00:00Z +``` + +--- + +### `is_first_time` + +**Description:** Checks if an alert with a given fingerprint is firing for the first time or first time within a specified period. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + # Check if this is the first time the alert is firing + first_time: keep.is_first_time(alert.fingerprint, tenant_id="tenant-id") + + # Check if this is the first time the alert is firing in the last 24 hours + first_time_24h: keep.is_first_time(alert.fingerprint, "24h", tenant_id="tenant-id") +``` + +--- + +### `all` + +**Description:** Checks if all elements in an iterable are identical. +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.all([1, 1, 1]) # Output: true +``` + +--- + +### `diff` + +**Description:** Checks if any elements in an iterable are different (opposite of `all`). +**Example:** + +```yaml +steps: + - name: example-step + provider: + type: mock + with: + message: keep.diff([1, 2, 1]) # Output: true +``` + +--- diff --git a/docs/workflows/syntax/permissions.mdx b/docs/workflows/syntax/permissions.mdx new file mode 100644 index 0000000000..17430c1c95 --- /dev/null +++ b/docs/workflows/syntax/permissions.mdx @@ -0,0 +1,99 @@ +--- +title: "Permissions" +--- + +# Permissions + +Permissions in Keep Workflow Engine define **who can execute a workflow manually**. + +They allow you to restrict access to workflows based on user roles or specific email addresses, ensuring that only authorized users can trigger sensitive workflows. + + +Currently, permissions can only be edited directly in the workflow YAML file. The workflow builder UI does not support editing permissions at this time. + + +--- + +## General Structure + +Permissions are defined at the top level of a workflow YAML file using the `permissions` field, which accepts a list of roles and/or email addresses. + +```yaml +workflow: + id: sensitive-workflow + name: Sensitive Workflow + description: "A workflow with restricted access" + permissions: + - admin + - john.doe@example.com + steps: + # workflow steps +``` + +## How Permissions Work + +When a workflow has permissions defined: + +1. **Admin users** can always run the workflow regardless of the permissions list +2. **Non-admin users** can only run the workflow if: + - Their role is explicitly listed in the permissions + - OR their email address is explicitly listed in the permissions +3. If the `permissions` field is empty or not defined, any user with the `write:workflows` permission can run the workflow + +## Supported Role Types + +Keep supports the following role types that can be used in the permissions list: + +- `admin`: Administrator users with full system access +- `noc`: Network Operations Center users with read-only access +- `webhook`: API access for webhook integrations +- `workflowrunner`: Special role for running workflows via API + +## Examples + +### Restricting to Admin Users Only + +```yaml +workflow: + id: critical-infrastructure-workflow + name: Critical Infrastructure Workflow + permissions: + - admin + steps: + # workflow steps +``` + +### Allowing Specific Users + +```yaml +workflow: + id: department-specific-workflow + name: Department Specific Workflow + permissions: + - sarah.smith@example.com + - team.lead@example.com + steps: + # workflow steps +``` + +### Combining Roles and Individual Users + +```yaml +workflow: + id: mixed-permissions-workflow + name: Mixed Permissions Workflow + permissions: + - admin + - noc + - devops.specialist@example.com + steps: + # workflow steps +``` + +## Best Practices + +- Use permissions for workflows that have significant impact on systems or trigger sensitive operations +- Consider using role-based permissions (like `admin` or `noc`) for groups of users with similar responsibilities +- List individual email addresses only for exceptions or when very specific access control is needed +- Review workflow permissions regularly as part of security audits +- Document which workflows have restricted permissions in your internal documentation diff --git a/docs/workflows/syntax/providers.mdx b/docs/workflows/syntax/providers.mdx new file mode 100644 index 0000000000..caafd732b4 --- /dev/null +++ b/docs/workflows/syntax/providers.mdx @@ -0,0 +1,79 @@ +--- +title: "Providers" +--- + +Providers are a fundamental part of workflows in Keep. They enable workflows to interact with external systems, fetch data, and perform actions. Each provider is designed to handle specific integrations such as Datadog, Slack, ServiceNow, or custom-built APIs. + +## Key Features of Providers + +- **Extensibility:** Providers can be easily extended to support new systems or custom use cases. + + You can explore and contribute to the existing providers or create your own in the [Keep Providers Code Directory on GitHub](https://github.com/keephq/keep/providers). + + +- **Parameterization:** Parameters under the `with` section are passed directly to the provider. This allows you to configure provider-specific settings for each step or action. + +- **Provisioning:** Providers can be provisioned via CI/CD pipelines or through the Keep UI, providing flexibility for both automated and manual setups. + +--- + +## Defining a Provider + +To define a provider, include its configuration under the `providers` section of your workflow file. Here's an example: + +```yaml +providers: + slack: + description: "Slack provider for sending messages" + authentication: + webhook_url: "{{ env.SLACK_WEBHOOK_URL }}" +``` + +## Using a Provider in a Workflow + +Once a provider is defined, it can be used in workflow steps or actions by specifying its type and configuration. + +For example: + +```yaml +actions: + - name: trigger-slack + provider: + type: slack + config: "{{ providers.slack }}" + with: + channel: "#alerts" + message: "Alert triggered: {{ alert.name }}" + +``` + +- The `config` field links the action to the provider. +- The `with` section includes parameters that are passed to the provider. + +## Examples + +### Fetching Data with a Provider + +```yaml +steps: + - name: get-alerts + provider: + type: datadog + config: "{{ providers.datadog }}" + with: + query: "avg:cpu.usage{*}" + timeframe: "1h" +``` + +### Sending Notifications with a Provider +```yaml +actions: + - name: notify-slack + provider: + type: slack + config: "{{ providers.slack }}" + with: + channel: "#alerts" + message: "Critical alert: {{ alert.name }}" + +``` diff --git a/docs/workflows/syntax/steps-and-actions.mdx b/docs/workflows/syntax/steps-and-actions.mdx new file mode 100644 index 0000000000..4b75e087ff --- /dev/null +++ b/docs/workflows/syntax/steps-and-actions.mdx @@ -0,0 +1,213 @@ +--- +title: "Steps and Actions" +--- + +Steps and actions are the building blocks of workflows in Keep Workflow Engine. While they share a similar structure and syntax, the **difference between steps and actions is mostly semantic**: + +- **Steps**: Focused on querying data or triggering fetch-like operations from providers (e.g., querying databases, fetching logs, or retrieving information). +- **Actions**: Geared toward notifying or triggering outcomes, such as sending notifications, updating tickets, or invoking external services. + +Together, steps and actions allow workflows to both gather the necessary data and act upon it. + +--- + +## General Structure + +Both steps and actions are defined using a similar schema: + +### Steps + +Used for querying or fetching data. + +Step uses the `_query` method of each provider. + +```yaml +steps: + - name: + provider: + type: + config: + with: + +``` + +### Actions + +Used for notifications or triggering effects. + +Action uses the `_notify` method of each provider. + +```yaml + +actions: + - name: + provider: + type: + config: + with: + +``` + + +## Examples + + +### Fetch data from a MySQL database + +```yaml + +steps: + - name: get-user-data + provider: + type: mysql + config: "{{ providers.mysql-prod }}" + with: + query: "SELECT * FROM users WHERE id = 1" + single_row: true +``` + + +### Retrieve logs from Datadog + +```yaml +steps: + - name: get-service-logs + provider: + type: datadog + config: "{{ providers.datadog }}" + with: + query: "service:keep and @error" + timeframe: "1h" +``` + +### Query Kubernetes for running pods + +```yaml + +steps: + - name: get-pods + provider: + type: k8s + config: "{{ providers.k8s-cluster }}" + with: + command_type: get_pods +``` + +### Send an email + +```yaml +actions: + - name: send-email + provider: + type: email + config: "{{ providers.email }}" + with: + to: "user@example.com" + subject: "Account Updated" + body: "Your account details have been updated." +``` + +### Send a Slack Message + +```yaml +actions: + - name: notify-slack + provider: + type: slack + config: "{{ providers.slack-demo }}" + with: + message: "Critical alert received!" + +``` + +### Create a ticket in ServiceNow + +```yaml +actions: + - name: create-servicenow-ticket + provider: + type: servicenow + config: "{{ providers.servicenow }}" + with: + table_name: INCIDENT + payload: + short_description: "New incident created by Keep" + description: "Please investigate the issue." +``` + +## Combining Steps and Actions + +A workflow typically combines steps (for querying data) with actions (for notifications or outcomes). + +Here's few examples: + +### Query and Notify + +```yaml +workflow: + id: query-and-notify + description: "Query a database and notify via Slack" + steps: + - name: get-user-data + provider: + type: mysql + config: "{{ providers.mysql-prod }}" + with: + query: "SELECT email FROM users WHERE id = 1" + single_row: true + + actions: + - name: send-notification + provider: + type: slack + config: "{{ providers.slack-demo }}" + with: + message: "User email: {{ steps.get-user-data.results.email }}" +``` + +### Alert and Incident Management + +```yaml +workflow: + id: alert-management + description: "Handle alerts and create incidents" + steps: + - name: get-alert-details + provider: + type: datadog + config: "{{ providers.datadog }}" + with: + query: "service:keep and @alert" + timeframe: "1h" + + actions: + - name: create-incident + provider: + type: servicenow + config: "{{ providers.servicenow }}" + with: + table_name: INCIDENT + payload: + short_description: "Alert from Datadog: {{ steps.get-alert-details.results.alert_name }}" + description: "Details: {{ steps.get-alert-details.results.alert_description }}" +``` + +## Error Handling and Retries + +Both steps and actions support error handling to ensure workflows can recover from failures. + + +```yaml + +steps: + - name: fetch-data + provider: + type: http + with: + url: "https://api.example.com/data" + on-failure: + retry: + count: 3 + # Retry every 5 seconds + interval: 5 +``` diff --git a/docs/workflows/syntax/triggers.mdx b/docs/workflows/syntax/triggers.mdx new file mode 100644 index 0000000000..d12a617d82 --- /dev/null +++ b/docs/workflows/syntax/triggers.mdx @@ -0,0 +1,116 @@ +--- +title: "Triggers" +--- + +## Overview + +Triggers in Keep Workflow Engine define **when a workflow is executed**. Triggers are the starting point for workflows and can be configured to respond to a variety of events, conditions, or schedules. + +A workflow can have one or multiple triggers, and these triggers determine the specific circumstances under which the workflow is initiated. Examples include manual invocation, time-based schedules, or event-driven actions like alerts or incident updates. + +Triggers are defined under the `triggers` section of a workflow YAML file. Each trigger has a `type` and optional additional configurations or filters. + +## Supported Trigger Types + +### Manual Trigger + +Used to execute workflows on demand. + +```yaml +triggers: + - type: manual +``` + +### Interval Trigger + +Runs workflows at a regular time. + +```yaml +triggers: + - type: interval + # Run every 5 seconds + value: 5 +``` + +### Alert Trigger + +Executes a workflow when an alert is received. + +```yaml +triggers: + - type: alert +``` + + + If no filters or CEL expressions are specified, the workflow will be executed + for every alert that comes in. + + +### Filtering Alerts + +There are two ways to filter alerts in Keep: + +#### 1. CEL-based Filtering (Recommended) + +Keep uses [Common Expression Language (CEL)](https://github.com/google/cel-spec/blob/master/doc/langdef.md) for filtering alerts. CEL provides a powerful and flexible way to express conditions using a simple expression language. + +```yaml +triggers: + - type: alert + cel: source.contains("datadog") && severity == "critical" +``` + +Common CEL patterns: + +- String matching: `source.contains("prometheus")` +- Exact matching: `severity == "critical"` +- Multiple conditions: `source.contains("datadog") && severity == "critical"` +- Pattern matching: `name.contains("error") || name.contains("failure")` +- Complex conditions: `(source.contains("datadog") && severity == "critical") || (source.contains("newrelic") && severity == "error")` + +You can test and experiment with CEL expressions using the [CEL Playground](https://playcel.undistro.io/). + +#### 2. Legacy Filtering (Deprecated) + +The old filtering mechanism is deprecated but still supported for backward compatibility. It uses a list of key-value pairs with optional regex patterns. + +```yaml +triggers: + - type: alert + filters: + - key: severity + value: critical + - key: source + value: datadog + - key: service + value: r"(payments|ftp)" +``` + +### Incident Trigger + +Runs workflows when an incident is created, updated, or resolved. + +```yaml +triggers: + - type: incident + on: + - create + - update +``` + +### Field Change Trigger + +Executes a workflow when specific fields in an alert change, such as status or severity. + +```yaml +triggers: + - type: alert + only_on_change: + - status +``` + +## Summary + +Triggers are a powerful way to control the execution of workflows, ensuring that they respond appropriately to manual actions, schedules, or events. By leveraging CEL expressions or filters, workflows can be fine-tuned to execute only under specific conditions. + +For more information about CEL expressions, refer to the [CEL Language Definition](https://github.com/google/cel-spec/blob/master/doc/langdef.md) and experiment with expressions in the [CEL Playground](https://playcel.undistro.io/). diff --git a/docs/workflows/throttles/one-until-resolved.mdx b/docs/workflows/throttles/one-until-resolved.mdx deleted file mode 100644 index 6f36874bd5..0000000000 --- a/docs/workflows/throttles/one-until-resolved.mdx +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: "One Until Resolved" -description: "The action will trigger once the alert is resolved." ---- - -For example: - -1. Alert executed and action were triggered as a result -> the alert status is now "Firing". -2. Alert executed again and action should be triggered -> the action will be throttled. -3. Alert executed and no action is required -> the alert status is now "Resolved". -4. Alert exectued and action were triggered -> the action is triggered - -## How to use - -Add the following attribute to your action: - -``` -throttle: - type: one_until_resolved -``` - -For example: - -``` -# Database disk space is low (<10%) -alert: - id: service-is-up - description: Check that the service is up - steps: - - name: service-is-up - provider: - type: python - with: - # any external libraries needed - imports: requests - code: requests.get("http://localhost:3000") - actions: - - name: trigger-slack - throttle: - type: one_until_resolved - condition: - - type: assert - assert: "{{ steps.this.results.status_code }} == 200" -``` diff --git a/docs/workflows/throttles/what-is-a-throttle.mdx b/docs/workflows/throttles/what-is-a-throttle.mdx deleted file mode 100644 index f885d72bd0..0000000000 --- a/docs/workflows/throttles/what-is-a-throttle.mdx +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: "What is a Throttle?" -description: "The purpose of throttling is to prevent any action from being triggered too many times, thus generating too many alerts." ---- - -## Throttle strategies - -- [One Until Resolved](/platform/core/throttles/what-is-a-throttle) - -## Implementing new strategy - -To create a new throttle strategy, create a new class that inherits from `base_throttle.py`, and implements `check_throttling`. - -[You can also just submit a new feature request](https://github.com/keephq/keep/issues/new?assignees=&labels=&template=feature_request.md&title=feature:%20new%20throttling%20strategy) and we will get to it ASAP! diff --git a/docs/workflows/ui/getting-started.mdx b/docs/workflows/ui/getting-started.mdx deleted file mode 100644 index 6bf28e4eea..0000000000 --- a/docs/workflows/ui/getting-started.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "" -sidebarTitle: "Getting Started" ---- diff --git a/ee/experimental/incident_utils.py b/ee/experimental/incident_utils.py deleted file mode 100644 index 975e64ff2d..0000000000 --- a/ee/experimental/incident_utils.py +++ /dev/null @@ -1,148 +0,0 @@ -import numpy as np -import pandas as pd -import networkx as nx - -from typing import List - -from keep.api.models.db.alert import Alert - - -def mine_incidents(alerts: List[Alert], incident_sliding_window_size: int=6*24*60*60, statistic_sliding_window_size: int=60*60, - jaccard_threshold: float=0.0, fingerprint_threshold: int=1): - """ - Mine incidents from alerts. - """ - - alert_dict = { - 'fingerprint': [alert.fingerprint for alert in alerts], - 'timestamp': [alert.timestamp for alert in alerts], - } - alert_df = pd.DataFrame(alert_dict) - mined_incidents = shape_incidents(alert_df, 'fingerprint', incident_sliding_window_size, statistic_sliding_window_size, - jaccard_threshold, fingerprint_threshold) - - return [ - { - "incident_fingerprint": incident['incident_fingerprint'], - "alerts": [alert for alert in alerts if alert.fingerprint in incident['alert_fingerprints']], - } - for incident in mined_incidents - ] - - -def get_batched_alert_counts(alerts: pd.DataFrame, unique_alert_identifier: str, sliding_window_size: int) -> np.ndarray: - """ - Get the number of alerts in a sliding window. - """ - - resampled_alert_counts = alerts.set_index('timestamp').resample( - f'{sliding_window_size//2}s')[unique_alert_identifier].value_counts().unstack(fill_value=0) - rolling_counts = resampled_alert_counts.rolling( - window=f'{sliding_window_size}s', min_periods=1).sum() - alert_counts = rolling_counts.to_numpy() - - return alert_counts - - -def get_batched_alert_occurrences(alerts: pd.DataFrame, unique_alert_identifier: str, sliding_window_size: int) -> np.ndarray: - """ - Get the occurrence of alerts in a sliding window. - """ - - alert_counts = get_batched_alert_counts( - alerts, unique_alert_identifier, sliding_window_size) - alert_occurences = np.where(alert_counts > 0, 1, 0) - - return alert_occurences - - -def get_jaccard_scores(P_a: np.ndarray, P_aa: np.ndarray) -> np.ndarray: - """ - Calculate the Jaccard similarity scores between alerts. - """ - - P_a_matrix = P_a[:, None] + P_a - union_matrix = P_a_matrix - P_aa - - with np.errstate(divide='ignore', invalid='ignore'): - jaccard_matrix = np.where(union_matrix != 0, P_aa / union_matrix, 0) - - np.fill_diagonal(jaccard_matrix, 1) - - return jaccard_matrix - - -def get_alert_jaccard_matrix(alerts: pd.DataFrame, unique_alert_identifier: str, sliding_window_size: int) -> np.ndarray: - """ - Calculate the Jaccard similarity scores between alerts. - """ - - alert_occurrences = get_batched_alert_occurrences( - alerts, unique_alert_identifier, sliding_window_size) - alert_probabilities = np.mean(alert_occurrences, axis=0) - joint_alert_occurrences = np.dot(alert_occurrences.T, alert_occurrences) - pairwise_alert_probabilities = joint_alert_occurrences / \ - alert_occurrences.shape[0] - - return get_jaccard_scores(alert_probabilities, pairwise_alert_probabilities) - - -def build_graph_from_occurrence(occurrence_row: pd.DataFrame, jaccard_matrix: np.ndarray, unique_alert_identifiers: List[str], - jaccard_threshold: float = 0.05) -> nx.Graph: - """ - Build a weighted graph using alert occurrence matrix and Jaccard coefficients. - """ - - present_indices = np.where(occurrence_row > 0)[0] - - G = nx.Graph() - - for idx in present_indices: - alert_desc = unique_alert_identifiers[idx] - G.add_node(alert_desc) - - for i in present_indices: - for j in present_indices: - if i != j and jaccard_matrix[i, j] >= jaccard_threshold: - alert_i = unique_alert_identifiers[i] - alert_j = unique_alert_identifiers[j] - G.add_edge(alert_i, alert_j, weight=jaccard_matrix[i, j]) - - return G - -def shape_incidents(alerts: pd.DataFrame, unique_alert_identifier: str, incident_sliding_window_size: int, statistic_sliding_window_size: int, - jaccard_threshold: float = 0.2, fingerprint_threshold: int = 5) -> List[dict]: - """ - Shape incidents from alerts. - """ - - incidents = [] - incident_number = 0 - - resampled_alert_counts = alerts.set_index('timestamp').resample( - f'{incident_sliding_window_size//2}s')[unique_alert_identifier].value_counts().unstack(fill_value=0) - jaccard_matrix = get_alert_jaccard_matrix( - alerts, unique_alert_identifier, statistic_sliding_window_size) - - for idx in range(resampled_alert_counts.shape[0]): - graph = build_graph_from_occurrence( - resampled_alert_counts.iloc[idx], jaccard_matrix, resampled_alert_counts.columns, jaccard_threshold=jaccard_threshold) - max_component = max(nx.connected_components(graph), key=len) - - min_starts_at = resampled_alert_counts.index[idx] - max_starts_at = min_starts_at + \ - pd.Timedelta(seconds=incident_sliding_window_size) - - local_alerts = alerts[(alerts['timestamp'] >= min_starts_at) & ( - alerts['timestamp'] <= max_starts_at)] - local_alerts = local_alerts[local_alerts[unique_alert_identifier].isin( - max_component)] - - if len(max_component) > fingerprint_threshold: - - incidents.append({ - 'incident_fingerprint': f'Incident #{incident_number}', - 'alert_fingerprints': local_alerts[unique_alert_identifier].unique().tolist(), - }) - - return incidents \ No newline at end of file diff --git a/ee/experimental/__init__.py b/ee/identitymanager/__init__.py similarity index 100% rename from ee/experimental/__init__.py rename to ee/identitymanager/__init__.py diff --git a/keep/providers/mailchimp_provider/__init__.py b/ee/identitymanager/identity_managers/__init__.py similarity index 100% rename from keep/providers/mailchimp_provider/__init__.py rename to ee/identitymanager/identity_managers/__init__.py diff --git a/ee/identitymanager/identity_managers/auth0/__init__.py b/ee/identitymanager/identity_managers/auth0/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ee/identitymanager/identity_managers/auth0/auth0_authverifier.py b/ee/identitymanager/identity_managers/auth0/auth0_authverifier.py new file mode 100644 index 0000000000..5ee4503262 --- /dev/null +++ b/ee/identitymanager/identity_managers/auth0/auth0_authverifier.py @@ -0,0 +1,130 @@ +import logging +import os + +import jwt +import requests +from fastapi import HTTPException + +from keep.identitymanager.authenticatedentity import AuthenticatedEntity +from keep.identitymanager.authverifierbase import AuthVerifierBase +from keep.identitymanager.rbac import Admin as AdminRole + +logger = logging.getLogger(__name__) + + +def _discover_jwks_uri(auth_domain: str) -> str: + """Discover the JWKS URI via the OpenID Connect Discovery endpoint. + + Per the OpenID Connect Discovery 1.0 specification + (https://openid.net/specs/openid-connect-discovery-1_0.html#rfc.section.3), + the ``jwks_uri`` should be obtained from the provider's discovery document + at ``{issuer}/.well-known/openid-configuration``. + + Falls back to the Auth0-style ``/.well-known/jwks.json`` path when the + discovery document is unavailable or does not contain ``jwks_uri``. + """ + discovery_url = f"https://{auth_domain}/.well-known/openid-configuration" + try: + resp = requests.get(discovery_url, timeout=10) + resp.raise_for_status() + discovered_uri = resp.json().get("jwks_uri") + if discovered_uri: + return discovered_uri + logger.warning( + "OpenID discovery document at %s did not contain jwks_uri, " + "falling back to /.well-known/jwks.json", + discovery_url, + ) + except Exception: + logger.warning( + "Failed to fetch OpenID discovery document from %s, " + "falling back to /.well-known/jwks.json", + discovery_url, + exc_info=True, + ) + # Fallback: Auth0's conventional JWKS endpoint + return f"https://{auth_domain}/.well-known/jwks.json" + + +# Note: cache_keys is set to True to avoid fetching the jwks keys on every request +auth_domain = os.environ.get("AUTH0_DOMAIN") +if auth_domain: + jwks_uri = _discover_jwks_uri(auth_domain) + jwks_client = jwt.PyJWKClient( + jwks_uri, cache_keys=True, headers={"User-Agent": "keep-api"} + ) +else: + jwks_client = None + + +class Auth0AuthVerifier(AuthVerifierBase): + """Handles authentication and authorization for multi tenant mode""" + + def __init__(self, scopes: list[str] = []) -> None: + # TODO: this verifier should be instantiated once and not for every endpoint/route + # to better cache the jwks keys + super().__init__(scopes) + # init once so the cache will actually work + self.auth_domain = os.environ.get("AUTH0_DOMAIN") + if not self.auth_domain: + raise Exception("Missing AUTH0_DOMAIN environment variable") + self.jwks_uri = _discover_jwks_uri(self.auth_domain) + # Note: cache_keys is set to True to avoid fetching the jwks keys on every request + # but it currently caches only per-route. After moving this auth verifier to be a singleton, we can cache it globally + self.issuer = f"https://{self.auth_domain}/" + self.auth_audience = os.environ.get("AUTH0_AUDIENCE") + + def _verify_bearer_token(self, token) -> AuthenticatedEntity: + from opentelemetry import trace + + tracer = trace.get_tracer(__name__) + with tracer.start_as_current_span("verify_bearer_token"): + if not token: + raise HTTPException(status_code=401, detail="No token provided 👈") + + # more than one tenant support + if token.startswith("keepActiveTenant"): + active_tenant, token = token.split("&") + active_tenant = active_tenant.split("=")[1] + else: + active_tenant = None + + try: + jwt_signing_key = jwks_client.get_signing_key_from_jwt(token).key + payload = jwt.decode( + token, + jwt_signing_key, + algorithms="RS256", + audience=self.auth_audience, + issuer=self.issuer, + leeway=60, + ) + # if active_tenant is set, we must verify its in the token + if active_tenant: + active_tenant_found = False + for tenant in payload.get("keep_tenant_ids", []): + if tenant.get("tenant_id") == active_tenant: + active_tenant_found = True + break + if not active_tenant_found: + self.logger.warning( + "Someone tries to use a token with a tenant that is not in the token" + ) + raise HTTPException( + status_code=401, + detail="Token does not contain the active tenant", + ) + tenant_id = active_tenant + else: + tenant_id = payload.get("keep_tenant_id") + role_name = payload.get( + "keep_role", AdminRole.get_name() + ) # default to admin for backwards compatibility + email = payload.get("email") + return AuthenticatedEntity(tenant_id, email, role=role_name) + except jwt.exceptions.DecodeError: + self.logger.exception("Failed to decode token") + raise HTTPException(status_code=401, detail="Token is not a valid JWT") + except Exception as e: + self.logger.exception("Failed to validate token") + raise HTTPException(status_code=401, detail=str(e)) diff --git a/ee/identitymanager/identity_managers/auth0/auth0_identitymanager.py b/ee/identitymanager/identity_managers/auth0/auth0_identitymanager.py new file mode 100644 index 0000000000..03c5dcfc7d --- /dev/null +++ b/ee/identitymanager/identity_managers/auth0/auth0_identitymanager.py @@ -0,0 +1,133 @@ +import os +import secrets + +import jwt +from fastapi import HTTPException + +from ee.identitymanager.identity_managers.auth0.auth0_authverifier import ( + Auth0AuthVerifier, +) +from ee.identitymanager.identity_managers.auth0.auth0_utils import getAuth0Client +from keep.api.models.user import User +from keep.contextmanager.contextmanager import ContextManager +from keep.identitymanager.identitymanager import BaseIdentityManager +from keep.identitymanager.rbac import Admin as AdminRole + + +class Auth0IdentityManager(BaseIdentityManager): + def __init__(self, tenant_id, context_manager: ContextManager, **kwargs): + super().__init__(tenant_id, context_manager, **kwargs) + self.logger.info("Auth0IdentityManager initialized") + self.domain = os.environ.get("AUTH0_DOMAIN") + self.client_id = os.environ.get("AUTH0_CLIENT_ID") + self.client_secret = os.environ.get("AUTH0_CLIENT_SECRET") + self.audience = f"https://{self.domain}/api/v2/" + self.jwks_client = jwt.PyJWKClient( + f"https://{self.domain}/.well-known/jwks.json", + cache_keys=True, + headers={"User-Agent": "keep-api"}, + ) + + def get_users(self) -> list[User]: + return self._get_users_auth0(self.tenant_id) + + def _get_users_auth0(self, tenant_id: str) -> list[User]: + auth0 = getAuth0Client() + users = auth0.users.list(q=f'app_metadata.keep_tenant_id:"{tenant_id}"') + users = [ + User( + email=user["email"], + name=user["name"], + # for backwards compatibility we return admin if no role is set + role=user.get("app_metadata", {}).get( + "keep_role", AdminRole.get_name() + ), + last_login=user.get("last_login", None), + created_at=user["created_at"], + picture=user["picture"], + ) + for user in users.get("users", []) + ] + return users + + def create_user(self, user_email: str, role: str, **kwargs) -> dict: + return self._create_user_auth0(user_email, self.tenant_id, role) + + def delete_user(self, user_email: str) -> dict: + auth0 = getAuth0Client() + users = auth0.users.list(q=f'app_metadata.keep_tenant_id:"{self.tenant_id}"') + for user in users.get("users", []): + if user["email"] == user_email: + auth0.users.delete(user["user_id"]) + return {"status": "OK"} + raise HTTPException(status_code=404, detail="User not found") + + def get_auth_verifier(self, scopes) -> Auth0AuthVerifier: + return Auth0AuthVerifier(scopes) + + def _create_user_auth0(self, user_email: str, tenant_id: str, role: str) -> dict: + auth0 = getAuth0Client() + # User email can exist in 1 tenant only for now. + users = auth0.users.list(q=f'email:"{user_email}"') + if users.get("users", []): + raise HTTPException(status_code=409, detail="User already exists") + user = auth0.users.create( + { + "email": user_email, + "password": secrets.token_urlsafe(13), + "email_verified": True, + "app_metadata": {"keep_tenant_id": tenant_id, "keep_role": role}, + "connection": os.environ.get("AUTH0_DB_NAME", "keep-users"), + } + ) + user_dto = User( + email=user["email"], + name=user["name"], + # for backwards compatibility we return admin if no role is set + role=user.get("app_metadata", {}).get("keep_role", AdminRole.get_name()), + last_login=user.get("last_login", None), + created_at=user["created_at"], + picture=user["picture"], + ) + return user_dto + + def update_user(self, user_email: str, update_data: dict) -> User: + auth0 = getAuth0Client() + users = auth0.users.list( + q=f'email:"{user_email}" AND app_metadata.keep_tenant_id:"{self.tenant_id}"' + ) + if not users.get("users", []): + raise HTTPException(status_code=404, detail="User not found") + + user = users["users"][0] + user_id = user["user_id"] + + update_body = {} + if "email" in update_data and update_data["email"]: + update_body["email"] = update_data["email"] + if "password" in update_data and update_data["password"]: + update_body["password"] = update_data["password"] + if "role" in update_data and update_data["role"]: + update_body["app_metadata"] = user.get("app_metadata", {}) + update_body["app_metadata"]["keep_role"] = update_data["role"] + if "groups" in update_data and update_data["groups"]: + # Assuming groups are stored in app_metadata + if "app_metadata" not in update_body: + update_body["app_metadata"] = user.get("app_metadata", {}) + update_body["app_metadata"]["groups"] = update_data["groups"] + + try: + updated_user = auth0.users.update(user_id, update_body) + return User( + email=updated_user["email"], + name=updated_user["name"], + role=updated_user.get("app_metadata", {}).get( + "keep_role", AdminRole.get_name() + ), + last_login=updated_user.get("last_login", None), + created_at=updated_user["created_at"], + picture=updated_user["picture"], + ) + except Exception as e: + self.logger.error(f"Error updating user: {str(e)}") + raise HTTPException(status_code=500, detail="Failed to update user") diff --git a/keep/api/utils/auth0_utils.py b/ee/identitymanager/identity_managers/auth0/auth0_utils.py similarity index 100% rename from keep/api/utils/auth0_utils.py rename to ee/identitymanager/identity_managers/auth0/auth0_utils.py diff --git a/ee/identitymanager/identity_managers/azuread/__init__.py b/ee/identitymanager/identity_managers/azuread/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ee/identitymanager/identity_managers/azuread/azuread_authverifier.py b/ee/identitymanager/identity_managers/azuread/azuread_authverifier.py new file mode 100644 index 0000000000..a14b3973db --- /dev/null +++ b/ee/identitymanager/identity_managers/azuread/azuread_authverifier.py @@ -0,0 +1,334 @@ +import hashlib +import logging +import os +from datetime import datetime, timedelta +from typing import Any, Dict, List, Optional + +import jwt +import requests +from fastapi import Depends, HTTPException +from jwt import PyJWK +from jwt.exceptions import ( + ExpiredSignatureError, + InvalidIssuedAtError, + InvalidIssuerError, + InvalidTokenError, + MissingRequiredClaimError, +) + +from keep.api.core.db import create_user, update_user_last_sign_in, user_exists +from keep.identitymanager.authenticatedentity import AuthenticatedEntity +from keep.identitymanager.authverifierbase import AuthVerifierBase, oauth2_scheme +from keep.identitymanager.rbac import Admin as AdminRole +from keep.identitymanager.rbac import Noc as NOCRole +from keep.identitymanager.rbac import get_role_by_role_name + +logger = logging.getLogger(__name__) + + +class AzureADGroupMapper: + """Maps Azure AD groups to Keep roles""" + + def __init__(self): + # Get group IDs from environment variables + self.admin_group_id = os.environ.get("KEEP_AZUREAD_ADMIN_GROUP_ID") + self.noc_group_id = os.environ.get("KEEP_AZUREAD_NOC_GROUP_ID") + + if not all([self.admin_group_id, self.noc_group_id]): + raise Exception( + "Missing KEEP_AZUREAD_ADMIN_GROUP_ID or KEEP_AZUREAD_NOC_GROUP_ID environment variables" + ) + + # Define group to role mapping + self.group_role_mapping = { + self.admin_group_id: AdminRole.get_name(), + self.noc_group_id: NOCRole.get_name(), + } + + def get_role_from_groups(self, groups: List[str]) -> Optional[str]: + """ + Determine Keep role based on Azure AD group membership + Returns highest privilege role if user is in multiple groups + """ + user_roles = set() + for group_id in groups: + if role := self.group_role_mapping.get(group_id): + user_roles.add(role) + + # If user is in admin group, return admin role + if AdminRole.get_name() in user_roles: + return AdminRole.get_name() + # If user is in NOC group, return NOC role + elif NOCRole.get_name() in user_roles: + return NOCRole.get_name() + # No matching groups + return None + + +class AzureADKeysManager: + """Singleton class to manage Azure AD signing keys""" + + _instance = None + _signing_keys: Dict[str, Any] = {} + _last_updated: Optional[datetime] = None + _cache_duration = timedelta(hours=24) + + def __new__(cls): + if cls._instance is None: + cls._instance = super(AzureADKeysManager, cls).__new__(cls) + return cls._instance + + def __init__(self): + if self._last_updated is None: + self.tenant_id = os.environ.get("KEEP_AZUREAD_TENANT_ID") + if not self.tenant_id: + raise Exception("Missing KEEP_AZUREAD_TENANT_ID environment variable") + self.jwks_uri = f"https://login.microsoftonline.com/{self.tenant_id}/discovery/v2.0/keys" + self._refresh_keys() + + def _refresh_keys(self) -> None: + """Fetch signing keys from Azure AD's JWKS endpoint""" + try: + response = requests.get(self.jwks_uri) + response.raise_for_status() + jwks = response.json() + + new_keys = {} + for key in jwks.get("keys", []): + if key.get("use") == "sig": # Only use signing keys + logger.debug("Loading public key from certificate: %s", key) + cert_obj = PyJWK(key, "RS256") + if kid := key.get("kid"): + new_keys[kid] = cert_obj.key + + if new_keys: # Only update if we got valid keys + self._signing_keys = new_keys + self._last_updated = datetime.utcnow() + logger.info("Successfully refreshed Azure AD signing keys") + else: + logger.error("No valid signing keys found in JWKS response") + + except requests.RequestException as e: + logger.error(f"Failed to fetch signing keys: {str(e)}") + if not self._signing_keys: + raise HTTPException( + status_code=500, detail="Unable to verify tokens at this time" + ) + + def get_signing_key(self, kid: str) -> Optional[Any]: + """Get a signing key by its ID, refreshing if necessary""" + now = datetime.utcnow() + + # Refresh keys if they're expired or if we can't find the requested key + if ( + self._last_updated is None + or now - self._last_updated > self._cache_duration + or (kid not in self._signing_keys) + ): + self._refresh_keys() + + return self._signing_keys.get(kid) + + +# Initialize the keys manager globally +azure_keys_manager = AzureADKeysManager() + + +class AzureadAuthVerifier(AuthVerifierBase): + """Handles authentication and authorization for Azure AD""" + + def __init__(self, scopes: list[str] = []) -> None: + super().__init__(scopes) + # Azure AD configurations + self.tenant_id = os.environ.get("KEEP_AZUREAD_TENANT_ID") + self.client_id = os.environ.get("KEEP_AZUREAD_CLIENT_ID") + + if not all([self.tenant_id, self.client_id]): + raise Exception( + "Missing KEEP_AZUREAD_TENANT_ID or KEEP_AZUREAD_CLIENT_ID environment variable" + ) + + self.group_mapper = AzureADGroupMapper() + # Keep track of hashed tokens so we won't update the user on the same token + self.saw_tokens = set() + + def _verify_bearer_token( + self, token: str = Depends(oauth2_scheme) + ) -> AuthenticatedEntity: + """Verify the Azure AD JWT token and extract claims""" + + try: + # First decode without verification to get the key id (kid) + unverified_headers = jwt.get_unverified_header(token) + kid = unverified_headers.get("kid") + + if not kid: + raise HTTPException(status_code=401, detail="No key ID in token header") + + # Get the signing key from the global manager + signing_key = azure_keys_manager.get_signing_key(kid) + if not signing_key: + raise HTTPException(status_code=401, detail="Invalid token signing key") + + # For v2.0 tokens, 'appid' doesn't exist — 'azp' is used instead. + # Remove "appid" from the 'require' list so v2 tokens won't fail. + options = { + "verify_signature": True, + "verify_aud": False, # We'll validate manually below + "verify_iat": True, + "verify_exp": True, + "verify_nbf": True, + # we will validate manually since we need to support both + # v1 (sts.windows.net) and v2 (https://login.microsoftonline.com) + "verify_iss": False, + # "require" the standard claims but NOT "appid" (search for 'azp' in this code to see the comment) + "require": ["exp", "iat", "nbf", "iss", "sub"], + } + + try: + + payload = jwt.decode( + token, + key=signing_key, + algorithms=["RS256"], + options=options, + ) + + # ---- MANUAL ISSUER CHECK ---- + # Allowed issuers for v1 vs. v2 in the same tenant: + allowed_issuers = [ + f"https://sts.windows.net/{self.tenant_id}/", # v1 tokens + f"https://login.microsoftonline.com/{self.tenant_id}/v2.0", # v2 tokens + ] + issuer_in_token = payload.get("iss") + if issuer_in_token not in allowed_issuers: + raise HTTPException(status_code=401, detail="Invalid token issuer") + + # Check client ID: v1 -> 'appid', v2 -> 'azp' + client_id_in_token = payload.get("appid") or payload.get("azp") + + if not client_id_in_token: + raise HTTPException( + status_code=401, detail="No client ID (appid/azp) in token" + ) + + if client_id_in_token != self.client_id: + raise HTTPException( + status_code=401, + detail="Invalid token application ID (appid/azp)", + ) + + # Validate the audience + allowed_aud = [ + f"api://{self.client_id}", # v1 tokens + f"{self.client_id}", # v2 tokens + ] + if payload.get("aud") not in allowed_aud: + self.logger.error( + f"Invalid token audience: {payload.get('aud')}", + extra={ + "tenant_id": self.tenant_id, + "audience": payload.get("aud"), + "allowed_aud": allowed_aud, + }, + ) + raise HTTPException( + status_code=401, detail="Invalid token audience" + ) + + except ExpiredSignatureError: + raise HTTPException(status_code=401, detail="Token has expired") + except InvalidIssuerError: + raise HTTPException(status_code=401, detail="Invalid token issuer") + except (InvalidIssuedAtError, MissingRequiredClaimError): + raise HTTPException( + status_code=401, detail="Token is missing required claims" + ) + except InvalidTokenError as e: + logger.error(f"Token validation failed: {str(e)}") + raise HTTPException(status_code=401, detail="Invalid token") + + # Extract relevant claims + tenant_id = payload.get("tid") + email = ( + payload.get("email") + or payload.get("preferred_username") + or payload.get("unique_name") + ) + + if not all([tenant_id, email]): + raise HTTPException(status_code=401, detail="Missing required claims") + + # Clean up email if it's in the live.com#email@domain.com format + if "#" in email: + email = email.split("#")[1] + + # Get groups from token + groups = payload.get("groups", []) + + # Map groups to role + role_name = self.group_mapper.get_role_from_groups(groups) + if not role_name: + self.logger.warning( + f"User {email} is not a member of any authorized groups for Keep", + extra={ + "tenant_id": tenant_id, + "groups": groups, + }, + ) + raise HTTPException( + status_code=403, + detail="User not a member of any authorized groups for Keep", + ) + + # Validate role scopes + role = get_role_by_role_name(role_name) + if not role.has_scopes(self.scopes): + self.logger.warning( + f"Role {role_name} does not have required permissions", + extra={ + "tenant_id": tenant_id, + "role": role_name, + }, + ) + raise HTTPException( + status_code=403, + detail=f"Role {role_name} does not have required permissions", + ) + + # Auto-provisioning logic + hashed_token = hashlib.sha256(token.encode()).hexdigest() + if hashed_token not in self.saw_tokens and not user_exists( + tenant_id, email + ): + create_user( + tenant_id=tenant_id, username=email, role=role_name, password="" + ) + + if hashed_token not in self.saw_tokens: + update_user_last_sign_in(tenant_id, email) + self.saw_tokens.add(hashed_token) + + return AuthenticatedEntity(tenant_id, email, None, role_name) + + except HTTPException: + # Re-raise known HTTP errors + self.logger.exception("Token validation failed (HTTPException)") + raise + except Exception: + self.logger.exception("Token validation failed") + raise HTTPException(status_code=401, detail="Invalid token") + + def _authorize(self, authenticated_entity: AuthenticatedEntity) -> None: + """ + Authorize the authenticated entity against required scopes + """ + if not authenticated_entity.role: + raise HTTPException(status_code=403, detail="No role assigned") + + role = get_role_by_role_name(authenticated_entity.role) + if not role.has_scopes(self.scopes): + raise HTTPException( + status_code=403, + detail="You don't have the required permissions to access this resource", + ) diff --git a/ee/identitymanager/identity_managers/azuread/azuread_identitymanager.py b/ee/identitymanager/identity_managers/azuread/azuread_identitymanager.py new file mode 100644 index 0000000000..373c08bb4d --- /dev/null +++ b/ee/identitymanager/identity_managers/azuread/azuread_identitymanager.py @@ -0,0 +1,33 @@ +from ee.identitymanager.identity_managers.azuread.azuread_authverifier import ( + AzureadAuthVerifier, +) +from keep.api.models.user import User +from keep.contextmanager.contextmanager import ContextManager +from keep.identitymanager.identity_managers.db.db_identitymanager import ( + DbIdentityManager, +) +from keep.identitymanager.identitymanager import BaseIdentityManager + + +class AzureadIdentityManager(BaseIdentityManager): + def __init__(self, tenant_id, context_manager: ContextManager, **kwargs): + super().__init__(tenant_id, context_manager, **kwargs) + self.db_identity_manager = DbIdentityManager( + tenant_id, context_manager, **kwargs + ) + + def get_users(self) -> list[User]: + # we keep the azuread users in the db + return self.db_identity_manager.get_users(self.tenant_id) + + def create_user(self, user_email: str, role: str, **kwargs) -> dict: + return None + + def delete_user(self, user_email: str) -> dict: + raise NotImplementedError("AzureadIdentityManager.delete_user") + + def get_auth_verifier(self, scopes) -> AzureadAuthVerifier: + return AzureadAuthVerifier(scopes) + + def update_user(self, user_email: str, update_data: dict) -> User: + raise NotImplementedError("AzureadIdentityManager.update_user") diff --git a/ee/identitymanager/identity_managers/keycloak/__init__.py b/ee/identitymanager/identity_managers/keycloak/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ee/identitymanager/identity_managers/keycloak/keycloak_authverifier.py b/ee/identitymanager/identity_managers/keycloak/keycloak_authverifier.py new file mode 100644 index 0000000000..8bc4179ae5 --- /dev/null +++ b/ee/identitymanager/identity_managers/keycloak/keycloak_authverifier.py @@ -0,0 +1,393 @@ +import logging +import os + +from fastapi import Depends, HTTPException + +from keep.api.core.config import config +from keep.api.core.db import create_tenant, get_tenants +from keep.identitymanager.authenticatedentity import AuthenticatedEntity +from keep.identitymanager.authverifierbase import AuthVerifierBase, oauth2_scheme +from keep.identitymanager.rbac import Roles +from keycloak import KeycloakOpenID, KeycloakOpenIDConnection +from keycloak.connection import ConnectionManager +from keycloak.keycloak_uma import KeycloakUMA +from keycloak.uma_permissions import UMAPermission + +logger = logging.getLogger(__name__) + + +# PATCH TO MONKEYPATCH KEYCLOAK VERIFY BUG +# https://github.com/marcospereirampj/python-keycloak/issues/645 + +original_init = ConnectionManager.__init__ + + +def patched_init( + self, + base_url: str, + headers: dict = None, + timeout: int = 60, + verify: bool = None, + proxies: dict = None, +): + if verify is None: + verify = os.environ.get("KEYCLOAK_VERIFY_CERT", "true").lower() == "true" + logger.warning( + "Using KEYCLOAK_VERIFY_CERT environment variable to set verify. ", + extra={"KEYCLOAK_VERIFY_CERT": verify}, + ) + + if headers is None: + headers = {} + original_init(self, base_url, headers, timeout, verify, proxies) + + +ConnectionManager.__init__ = patched_init + + +class KeycloakAuthVerifier(AuthVerifierBase): + """Handles authentication and authorization for Keycloak""" + + def __init__(self, scopes: list[str] = []) -> None: + super().__init__(scopes) + self.keycloak_url = os.environ.get("KEYCLOAK_URL") + self.keycloak_realm = os.environ.get("KEYCLOAK_REALM") + self.keycloak_client_id = os.environ.get("KEYCLOAK_CLIENT_ID") + self.keycloak_audience = os.environ.get("KEYCLOAK_AUDIENCE") + self.keycloak_verify_cert = ( + os.environ.get("KEYCLOAK_VERIFY_CERT", "true").lower() == "true" + ) + if ( + not self.keycloak_url + or not self.keycloak_realm + or not self.keycloak_client_id + ): + raise Exception( + "Missing KEYCLOAK_URL, KEYCLOAK_REALM or KEYCLOAK_CLIENT_ID environment variable" + ) + + self.keycloak_client = KeycloakOpenID( + server_url=self.keycloak_url, + realm_name=self.keycloak_realm, + client_id=self.keycloak_client_id, + client_secret_key=os.environ.get("KEYCLOAK_CLIENT_SECRET"), + verify=self.keycloak_verify_cert, + ) + self.keycloak_openid_connection = KeycloakOpenIDConnection( + server_url=self.keycloak_url, + realm_name=self.keycloak_realm, + client_id=self.keycloak_client_id, + client_secret_key=os.environ.get("KEYCLOAK_CLIENT_SECRET"), + verify=self.keycloak_verify_cert, + ) + self.keycloak_uma = KeycloakUMA(connection=self.keycloak_openid_connection) + # will be populated in on_start of the identity manager + self.protected_resource = None + self.roles_from_groups = config( + "KEYCLOAK_ROLES_FROM_GROUPS", default=False, cast=bool + ) + self.groups_claims = config("KEYCLOAK_GROUPS_CLAIM", default="groups") + self.groups_claims_admin = config( + "KEYCLOAK_GROUPS_CLAIM_ADMIN", default="admin" + ) + self.groups_claims_noc = config("KEYCLOAK_GROUPS_CLAIM_NOC", default="noc") + self.groups_claims_webhook = config( + "KEYCLOAK_GROUPS_CLAIM_WEBHOOK", default="webhook" + ) + self.groups_org_prefix = config( + "KEYCLOAK_GROUPS_ORG_PREFIX", default="keep" + ).lower() + self.keycloak_roles = { + self.groups_claims_admin: Roles.ADMIN, + self.groups_claims_noc: Roles.NOC, + self.groups_claims_webhook: Roles.WEBHOOK, + } + if self.roles_from_groups: + self.keycloak_multi_org = True + else: + self.keycloak_multi_org = False + + self.groups_separator = os.environ.get("KEYCLOAK_GROUPS_SEPERATOR", "-").lower() + self._tenants = [] + + @property + def tenants(self): + if not self._tenants: + tenants = get_tenants() + + self._tenants = { + tenant.name: { + "tenant_id": tenant.id, + "tenant_logo_url": ( + tenant.configuration.get("logo_url") + if tenant.configuration + else None + ), + } + for tenant in tenants + } + + return self._tenants + + def _reload_tenants(self): + self._tenants = [] + # access the property to reload the tenants + tenants = self.tenants + # log + self.logger.info("Reloaded tenants", extra={"tenants": tenants}) + + def get_org_name_by_tenant_id(self, tenant_id): + for org_name, org_tenant_id in self.tenants.items(): + if org_tenant_id.get("tenant_id") == tenant_id: + return org_name + + self.logger.error("Tenant id not found", extra={"tenant_id": tenant_id}) + raise Exception("Org not found") + + def _check_if_group_represents_org(self, group_name: str): + # if must start with the group prefix + if not group_name.startswith( + self.groups_org_prefix + ) and not group_name.startswith("/" + self.groups_org_prefix): + return False + + # TODO: dynamic roles + orgs + + # admin + if group_name.endswith(self.groups_claims_admin): + return True + + # noc + if group_name.endswith(self.groups_claims_noc): + return True + + # webhook + if group_name.endswith(self.groups_claims_webhook): + return True + + # if not, its not a group that represents an org + return False + + def _get_org_name(self, group_name): + # first, keycloak groups starts with "/" + if group_name.startswith("/"): + group_name = group_name[1:] + + # second, trim the role + org_name = self.groups_separator.join( + group_name.split(self.groups_separator)[0:-1] + ) + + return org_name + + def _get_role_in_org(self, user_groups, org_name): + # for the org_name (e.g. keep-org-a) iterate over the groups and find the role + # e.g. /org-a-admin, /org-a-noc, /org-a-webhook + # we want to iterate from the "strongest" to the "weakest" role + for role, keep_role in self.keycloak_roles.items(): + for group in user_groups: + group_lower = group.lower() + if org_name in group_lower and role in group_lower: + return keep_role.value + return None + + def _verify_bearer_token( + self, token: str = Depends(oauth2_scheme) + ) -> AuthenticatedEntity: + # verify keycloak token + try: + # more than one tenant support + if token.startswith("keepActiveTenant"): + active_tenant, token = token.split("&") + active_tenant = active_tenant.split("=")[1] + else: + active_tenant = None + payload = self.keycloak_client.decode_token(token, validate=True) + except Exception as e: + if "Expired" in str(e): + raise HTTPException(status_code=401, detail="Expired Keycloak token") + raise HTTPException(status_code=401, detail="Invalid Keycloak token") + tenant_id = payload.get("keep_tenant_id") + email = payload.get("preferred_username") + org_id = payload.get("active_organization", {}).get("id") + org_realm = payload.get("active_organization", {}).get("name") + if org_id is None or org_realm is None: + logger.warning( + "Invalid Keycloak configuration - no org information for user. Check organization mapper: https://github.com/keephq/keep/blob/main/keycloak/keep-realm.json#L93" + ) + + # this allows more than one tenant to be configured in the same keycloak realm + # todo: support dynamic roles + user_orgs = {} + if self.roles_from_groups: + self.logger.info("Using roles from groups") + # get roles from groups + # e.g. + # "group-keeps": [ + # "/ORG-A-USERS", + # "/ORG-B-USERS", + # "/org-users" + # ], + groups = payload.get(self.groups_claims, []) + groups_that_represent_orgs = [] + # first, create tenants if they are not exists (should be happen once, new group) + for group in groups: + # first, check if its an org group (e.g. keep-org-a) + group_lower = group.lower() + if self._check_if_group_represents_org(group_name=group_lower): + # check if its the configuration + org_name = self._get_org_name(group_lower) + groups_that_represent_orgs.append(group_lower) + if org_name not in self.tenants: + self.logger.info("Creating tenant") + org_tenant_id = create_tenant(tenant_name=org_name) + # so it won't be + self.tenants[org_name] = { + "tenant_id": org_tenant_id, + "tenant_logo_url": None, + } + self.logger.info("Tenant created") + # this will be returned to the UI + user_orgs[org_name] = self.tenants.get(org_name) + + # TODO: fix + if active_tenant: + # get the active_tenant grou + org_name = self.get_org_name_by_tenant_id(active_tenant) + tenant_id = active_tenant + if not tenant_id: + self.logger.warning( + "Tenant id not found, reloading tenants from db" + ) + self._reload_tenants() + tenant_id = self.get_org_name_by_tenant_id(active_tenant) + # if still + if not tenant_id: + self.logger.error( + "Tenant id not found, raising exception", + extra={"org_name": org_name}, + ) + raise HTTPException( + status_code=401, + detail="Invalid Keycloak token - could not find any group that represents the org and the role", + ) + role = self._get_role_in_org(groups, org_name) + if not role: + raise HTTPException( + status_code=401, + detail="Invalid Keycloak token - could not find any group that represents the org and the role", + ) + # if no active tenant, we take the first + else: + current_tenant_group = groups_that_represent_orgs[0] + org_name = self._get_org_name(current_tenant_group) + tenant_id = self.tenants.get(org_name).get("tenant_id") + if not tenant_id: + self.logger.warning( + "Tenant id not found, reloading tenants from db" + ) + self._reload_tenants() + tenant_id = self.tenants.get(org_name).get("tenant_id") + # if still + if not tenant_id: + self.logger.error( + "Tenant id not found, raising exception", + extra={"org_name": org_name}, + ) + raise HTTPException( + status_code=401, + detail="Invalid Keycloak token - could not find any group that represents the org and the role", + ) + if self.groups_claims_admin in current_tenant_group: + role = "admin" + elif self.groups_claims_noc in current_tenant_group: + role = "noc" + elif self.groups_claims_webhook in current_tenant_group: + role = "webhook" + else: + raise HTTPException( + status_code=401, + detail="Invalid Keycloak token - no role in groups", + ) + # Keycloak single tenant + else: + role = ( + payload.get("resource_access", {}) + .get(self.keycloak_client_id, {}) + .get("roles", []) + ) + # filter out uma_protection + role = [r for r in role if not r.startswith("uma_protection")] + if not role: + raise HTTPException( + status_code=401, detail="Invalid Keycloak token - no role" + ) + + role = role[0] + + # finally, check if the role is in the allowed roles + authenticated_entity = AuthenticatedEntity( + tenant_id, + email, + None, + role, + org_id=org_id, + org_realm=org_realm, + token=token, + ) + if user_orgs: + authenticated_entity.user_orgs = user_orgs + + return authenticated_entity + + def _authorize(self, authenticated_entity: AuthenticatedEntity) -> None: + + # multi org does not support UMA for now: + if self.keycloak_multi_org: + return super()._authorize(authenticated_entity) + + # API key auth does not carry a Keycloak token; fall back to RBAC + if not getattr(authenticated_entity, "token", None): + return super()._authorize(authenticated_entity) + + # for single tenant Keycloaks, use Keycloak's UMA to authorize + try: + permission = UMAPermission( + resource=self.protected_resource, + scope=self.scopes[0], # todo: handle multiple scopes per resource + ) + self.logger.info(f"Checking permission {permission}") + allowed = self.keycloak_uma.permissions_check( + token=authenticated_entity.token, permissions=[permission] + ) + self.logger.info(f"Permission check result: {allowed}") + if not allowed: + raise HTTPException(status_code=403, detail="Permission check failed") + # secure fallback + except Exception as e: + raise HTTPException( + status_code=403, detail="Permission check failed - " + str(e) + ) + return allowed + + def authorize_resource( + self, resource_type, resource_id, authenticated_entity: AuthenticatedEntity + ) -> None: + # API key auth does not carry a Keycloak token; skip per-resource UMA check + if not getattr(authenticated_entity, "token", None): + return + + # use Keycloak's UMA to authorize + try: + permission = UMAPermission( + resource=resource_id, + ) + allowed = self.keycloak_uma.permissions_check( + token=authenticated_entity.token, permissions=[permission] + ) + if not allowed: + raise HTTPException(status_code=401, detail="Permission check failed") + # secure fallback + except Exception: + raise HTTPException(status_code=401, detail="Permission check failed") + return allowed diff --git a/ee/identitymanager/identity_managers/keycloak/keycloak_identitymanager.py b/ee/identitymanager/identity_managers/keycloak/keycloak_identitymanager.py new file mode 100644 index 0000000000..6224338327 --- /dev/null +++ b/ee/identitymanager/identity_managers/keycloak/keycloak_identitymanager.py @@ -0,0 +1,1199 @@ +import json +import os + +import requests +from fastapi import HTTPException +from fastapi.routing import APIRoute +from starlette.routing import Route + +from ee.identitymanager.identity_managers.keycloak.keycloak_authverifier import ( + KeycloakAuthVerifier, +) +from keep.api.core.config import config +from keep.api.core.db import get_resource_ids_by_resource_type +from keep.api.models.user import Group, PermissionEntity, ResourcePermission, Role, User +from keep.contextmanager.contextmanager import ContextManager +from keep.identitymanager.authenticatedentity import AuthenticatedEntity +from keep.identitymanager.authverifierbase import AuthVerifierBase, get_all_scopes +from keep.identitymanager.identitymanager import PREDEFINED_ROLES, BaseIdentityManager +from keycloak import KeycloakAdmin +from keycloak.exceptions import KeycloakDeleteError, KeycloakGetError, KeycloakPostError +from keycloak.openid_connection import KeycloakOpenIDConnection + +# Some good sources on this topic: +# 1. https://stackoverflow.com/questions/42186537/resources-scopes-permissions-and-policies-in-keycloak +# 2. MUST READ - https://www.keycloak.org/docs/24.0.4/authorization_services/ +# 3. ADMIN REST API - https://www.keycloak.org/docs-api/22.0.1/rest-api/index.html +# 4. (TODO) PROTECTION API - https://www.keycloak.org/docs/latest/authorization_services/index.html#_service_protection_api + + +class KeycloakIdentityManager(BaseIdentityManager): + """ + RESOURCES = { + "preset": { + "table": "preset", + "uid": "id", + }, + "incident": { + "table": "incident", + "uid": "id", + }, + } + """ + + RESOURCES = {} + + def __init__(self, tenant_id, context_manager: ContextManager, **kwargs): + super().__init__(tenant_id, context_manager, **kwargs) + self.server_url = os.environ.get("KEYCLOAK_URL") + self.keycloak_verify_cert = ( + os.environ.get("KEYCLOAK_VERIFY_CERT", "true").lower() == "true" + ) + try: + self.keycloak_admin = KeycloakAdmin( + server_url=os.environ["KEYCLOAK_URL"] + "/admin", + username=os.environ.get("KEYCLOAK_ADMIN_USER"), + password=os.environ.get("KEYCLOAK_ADMIN_PASSWORD"), + realm_name=os.environ["KEYCLOAK_REALM"], + verify=self.keycloak_verify_cert, + ) + self.client_id = self.keycloak_admin.get_client_id( + os.environ["KEYCLOAK_CLIENT_ID"] + ) + self.keycloak_id_connection = KeycloakOpenIDConnection( + server_url=os.environ["KEYCLOAK_URL"], + client_id=os.environ["KEYCLOAK_CLIENT_ID"], + realm_name=os.environ["KEYCLOAK_REALM"], + client_secret_key=os.environ["KEYCLOAK_CLIENT_SECRET"], + verify=self.keycloak_verify_cert, + ) + + self.admin_url = f'{os.environ["KEYCLOAK_URL"]}/admin/realms/{os.environ["KEYCLOAK_REALM"]}/clients/{self.client_id}' + self.admin_url_without_client = f'{os.environ["KEYCLOAK_URL"]}/admin/realms/{os.environ["KEYCLOAK_REALM"]}' + self.realm = os.environ["KEYCLOAK_REALM"] + # if Keep controls the Keycloak server so it have event listener + # for future use + self.keep_controlled_keycloak = ( + os.environ.get("KEYCLOAK_KEEP_CONTROLLED", "false") == "true" + ) + # Does ABAC is enabled + self.abac_enabled = ( + os.environ.get("KEYCLOAK_ABAC_ENABLED", "true") == "true" + ) + + self.keycloak_multi_org = config( + "KEYCLOAK_ROLES_FROM_GROUPS", default=False, cast=bool + ) + + except Exception as e: + self.logger.error( + "Failed to initialize Keycloak Identity Manager: %s", str(e) + ) + raise + self.logger.info("Keycloak Identity Manager initialized") + + def on_start(self, app) -> None: + # if the on start process is disabled: + if os.environ.get("SKIP_KEYCLOAK_ONSTART", "false") == "true": + self.logger.info("Skipping keycloak on start") + return + # first, create all the scopes + for scope in get_all_scopes(): + self.logger.info("Creating scope: %s", scope) + self.create_scope(scope) + self.logger.info("Scope created: %s", scope) + # create resource for each route + for route in app.routes: + self.logger.info("Creating resource for route %s", route.path) + # fetch the scopes for this route from the auth dependency + if isinstance(route, Route) and not isinstance(route, APIRoute): + self.logger.info("Skipping route: %s", route.path) + continue + if not route.dependant.dependencies: + self.logger.warning("Skipping unprotected route: %s", route.path) + continue + + scopes = [] + for dep in route.dependant.dependencies: + # for routes that have other dependencies + if not isinstance(dep.cache_key[0], KeycloakAuthVerifier): + continue + scopes = dep.cache_key[0].scopes + # this is the KeycloakAuthVerifier dependency :) + methods = list(route.methods) + if len(methods) > 1: + self.logger.warning( + "Keep does not support multiple methods for a single route", + ) + continue + protected_resource = methods[0] + " " + route.path + dep.cache_key[0].protected_resource = protected_resource + break + + # protected route but without scopes + if not scopes: + self.logger.warning("Route without scopes: %s", route.path) + + self.create_resource( + protected_resource, scopes=scopes, resource_type="keep_route" + ) + self.logger.info("Resource created for route: %s", route.path) + + # another thing we need to do is to add a /auth/user/orgs endpoint that will + # return the orgs of the user for TenantSwitcher in the UI + if self.keycloak_multi_org: + self.logger.info("Creating /auth/user/orgs endpoint") + from fastapi import Depends + + from keep.identitymanager.identitymanagerfactory import ( + IdentityManagerFactory, + ) + + # we want to add it only once to skip endless loop + current_routes = [route.path for route in app.routes] + if "/auth/user/orgs" not in current_routes: + self.logger.info("Adding /auth/user/orgs endpoint") + + # add the endpoint + @app.get("/auth/user/orgs") + def tenant( + authenticated_entity: AuthenticatedEntity = Depends( + IdentityManagerFactory.get_auth_verifier([]) + ), + ): + tenants = authenticated_entity.user_orgs + return tenants + + # create resource for each object + if self.abac_enabled: + for resource_type, resource_type_data in self.RESOURCES.items(): + self.logger.info("Creating resource for object %s", resource_type) + resources = get_resource_ids_by_resource_type( + tenant_id=self.tenant_id, + table_name=resource_type_data["table"], + uid=resource_type_data["uid"], + ) + for resource_id in resources: + resource_name = f"{resource_type}_{resource_id}" + resource_type_name = f"keep_{resource_type}" + self.create_resource( + resource_name=resource_name, + scopes=[], + resource_type=resource_type_name, + ) + self.logger.info("Resource created for object: %s", resource_type) + for role in PREDEFINED_ROLES: + self.logger.info("Creating role: %s", role) + self.create_role(role, predefined=True) + self.logger.info("Role created: %s", role) + + def _scope_name_to_id(self, all_scopes, scope_name: str) -> str: + # if its ":*": + if scope_name.split(":")[1] == "*": + scope_verb = scope_name.split(":")[0] + scope_ids = [ + scope["id"] + for scope in all_scopes + if scope["name"].startswith(scope_verb) + ] + return scope_ids + else: + scope = next( + (scope for scope in all_scopes if scope["name"] == scope_name), + None, + ) + if not scope: + self.logger.error( + "Scope %s not found in Keycloak", + scope_name, + extra={"scopes": all_scopes}, + ) + return [] + return [scope["id"]] + + def get_permission_by_name(self, permission_name): + permissions = self.keycloak_admin.get_client_authz_permissions(self.client_id) + permission = next( + ( + permission + for permission in permissions + if permission["name"] == permission_name + ), + None, + ) + return permission + + def create_scope_based_permission(self, role: Role, policy_id: str) -> None: + try: + scopes = role.scopes + all_scopes = self.keycloak_admin.get_client_authz_scopes(self.client_id) + scopes_ids = set() + for scope in scopes: + scope_ids = self._scope_name_to_id(all_scopes, scope) + scopes_ids.update(scope_ids) + resp = self.keycloak_admin.create_client_authz_scope_permission( + client_id=self.client_id, + payload={ + "name": f"Permission for {role.name}", + "scopes": list(scopes_ids), + "policies": [policy_id], + "resources": [], + "decisionStrategy": "Affirmative".upper(), + "type": "scope", + "logic": "POSITIVE", + }, + ) + return resp + except KeycloakPostError as e: + # if the permissions already exists, just update it + if "already exists" in str(e): + self.logger.info("Scope based permission already exists in Keycloak") + # let's try to update + try: + permission = self.get_permission_by_name( + f"Permission for {role.name}" + ) + permission_id = permission.get("id") + resp = self.keycloak_admin.connection.raw_put( + path=f"{self.admin_url}/authz/resource-server/permission/scope/{permission_id}", + client_id=self.client_id, + data=json.dumps( + { + "name": f"Permission for {role.name}", + "scopes": list(scopes_ids), + "policies": [policy_id], + "resources": [], + "decisionStrategy": "Affirmative".upper(), + "type": "scope", + "logic": "POSITIVE", + } + ), + ) + except Exception: + pass + else: + self.logger.error( + "Failed to create scope based permission in Keycloak: %s", str(e) + ) + raise HTTPException( + status_code=500, detail="Failed to create scope based permission" + ) + + def create_scope(self, scope: str) -> None: + try: + self.keycloak_admin.create_client_authz_scopes( + self.client_id, + { + "name": scope, + "displayName": f"Scope for {scope}", + }, + ) + except KeycloakPostError as e: + self.logger.error("Failed to create scopes in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to create scopes") + + def create_role(self, role: Role, predefined=False) -> str: + try: + role_name = self.keycloak_admin.create_client_role( + self.client_id, + { + "name": role.name, + "description": f"Role for {role.name}", + # we will use this to identify the role as predefined + "attributes": { + "predefined": [str(predefined).lower()], + }, + }, + skip_exists=True, + ) + role_id = self.keycloak_admin.get_client_role_id(self.client_id, role_name) + # create the role policy + policy_id = self.create_role_policy(role_id, role.name, role.description) + # create the scope based permission + self.create_scope_based_permission(role, policy_id) + return role_id + except KeycloakPostError as e: + if "already exists" in str(e): + self.logger.info("Role already exists in Keycloak") + # its ok! + pass + else: + self.logger.error("Failed to create roles in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to create roles") + + def update_role(self, role_id: str, role: Role) -> str: + # just update the policy + role_id = self.keycloak_admin.get_client_role_id(self.client_id, role.name) + scopes = role.scopes + all_scopes = self.keycloak_admin.get_client_authz_scopes(self.client_id) + scopes_ids = set() + for scope in scopes: + scope_ids = self._scope_name_to_id(all_scopes, scope) + scopes_ids.update(scope_ids) + # get the scope-based permission + permissions = self.keycloak_admin.get_client_authz_permissions(self.client_id) + permission = next( + ( + permission + for permission in permissions + if permission["name"] == f"Permission for {role.name}" + ), + None, + ) + if not permission: + raise HTTPException(status_code=404, detail="Permission not found") + permission_id = permission["id"] + permission["scopes"] = list(scopes_ids) + resp = self.keycloak_admin.connection.raw_put( + f"{self.admin_url}/authz/resource-server/permission/scope/{permission_id}", + data=json.dumps(permission), + ) + resp.raise_for_status() + return role_id + + def create_role_policy(self, role_id: str, role_name: str, role_description) -> str: + try: + resp = self.keycloak_admin.connection.raw_post( + f"{self.admin_url}/authz/resource-server/policy/role", + data=json.dumps( + { + "name": f"Allow {role_name} to {role_description}", + "description": f"Allow {role_name} to {role_description}", # future use + "roles": [{"id": role_id, "required": False}], + "logic": "POSITIVE", + "fetchRoles": False, + } + ), + ) + resp.raise_for_status() + resp = resp.json() + return resp.get("id") + except requests.exceptions.HTTPError as e: + if "Conflict" in str(e): + self.logger.info("Policy already exists in Keycloak") + # get its id + policies = self.get_policies() + # find by name + policy = next( + ( + policy + for policy in policies + if policy["name"] == f"Allow {role_name} to {role_description}" + ), + None, + ) + return policy["id"] + else: + self.logger.error("Failed to create policies in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to create policies") + except Exception as e: + self.logger.error("Failed to create policies in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to create policies") + + @property + def support_sso(self) -> bool: + return True + + def get_sso_providers(self) -> list[str]: + return [] + + def get_sso_wizard_url(self, authenticated_entity: AuthenticatedEntity) -> str: + tenant_realm = authenticated_entity.org_realm + org_id = authenticated_entity.org_id + return f"{self.server_url}realms/{tenant_realm}/wizard/?org_id={org_id}/#iss={self.server_url}/realms/{tenant_realm}" + + def get_users(self) -> list[User]: + try: + # TODO: query only users that Keep created (so not show all LDAP users) + users = self.keycloak_admin.get_users({}) + users = [user for user in users if "firstName" in user] + + users_dto = [] + for user in users: + # todo: should be more efficient + groups = self.keycloak_admin.get_user_groups(user["id"]) + groups = [ + { + "id": group["id"], + "name": group["name"], + } + for group in groups + ] + role = self.get_user_current_role(user_id=user.get("id")) + user_dto = User( + email=user.get("email", ""), + name=user.get("firstName", ""), + role=role, + created_at=user.get("createdTimestamp", ""), + ldap=( + True + if user.get("attributes", {}).get("LDAP_ID", False) + else False + ), + last_login=user.get("attributes", {}).get("last-login", [""])[0], + groups=groups, + ) + users_dto.append(user_dto) + return users_dto + except KeycloakGetError as e: + self.logger.error("Failed to fetch users from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to fetch users") + + def create_user( + self, + user_email: str, + user_name: str, + password: str, + role: list[str], + groups: list[str], + ) -> dict: + try: + user_data = { + "username": user_email, + "email": user_email, + "enabled": True, + "firstName": user_name, + "lastName": user_name, + "emailVerified": True, + } + if password: + user_data["credentials"] = [ + {"type": "password", "value": password, "temporary": False} + ] + + user_id = self.keycloak_admin.create_user(user_data) + if role: + role_id = self.keycloak_admin.get_client_role_id(self.client_id, role) + self.keycloak_admin.assign_client_role( + client_id=self.client_id, + user_id=user_id, + roles=[{"id": role_id, "name": role}], + ) + for group in groups: + self.add_user_to_group(user_id=user_id, group=group) + + return { + "status": "success", + "message": "User created successfully", + "user_id": user_id, + } + except KeycloakPostError as e: + if "User exists" in str(e): + self.logger.error( + "Failed to create user - user %s already exists", user_email + ) + raise HTTPException( + status_code=409, + detail=f"Failed to create user - user {user_email} already exists", + ) + self.logger.error("Failed to create user in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to create user") + + def get_user_id_by_email(self, user_email: str) -> str: + user_id = self.keycloak_admin.get_users(query={"email": user_email}) + if not user_id: + self.logger.error("User does not exists") + raise HTTPException(status_code=404, detail="User does not exists") + elif len(user_id) > 1: + self.logger.error("Multiple users found") + raise HTTPException( + status_code=500, detail="Multiple users found, please contact admin" + ) + user_id = user_id[0]["id"] + return user_id + + def get_user_current_role(self, user_id: str) -> str: + current_role = ( + self.keycloak_admin.connection.raw_get( + self.admin_url_without_client + f"/users/{user_id}/role-mappings" + ) + .json() + .get("clientMappings", {}) + .get(self.realm, {}) + .get("mappings") + ) + + if current_role: + # remove uma protection + current_role = [ + role for role in current_role if role["name"] != "uma_protection" + ] + # if uma_protection is the only role, then the user has no role + if current_role: + return current_role[0]["name"] + else: + return None + else: + return None + + def add_user_to_group(self, user_id: str, group: str): + resp = self.keycloak_admin.connection.raw_put( + f"{self.admin_url_without_client}/users/{user_id}/groups/{group}", + data=json.dumps({}), + ) + resp.raise_for_status() + + def update_user(self, user_email: str, update_data: dict) -> dict: + try: + user_id = self.get_user_id_by_email(user_email) + if "role" in update_data and update_data["role"]: + role = update_data["role"] + # get current role and understand if needs to be updated: + current_role = self.get_user_current_role(user_id) + # update the role only if its different than current + # TODO: more than one role + if current_role != role: + role_id = self.keycloak_admin.get_client_role_id( + self.client_id, role + ) + if not role_id: + self.logger.error("Role does not exists") + raise HTTPException( + status_code=404, detail="Role does not exists" + ) + self.keycloak_admin.assign_client_role( + client_id=self.client_id, + user_id=user_id, + roles=[{"id": role_id, "name": role}], + ) + if "groups" in update_data and update_data["groups"]: + # get the current groups + groups = self.keycloak_admin.get_user_groups(user_id) + groups_ids = [g.get("id") for g in groups] + # calc with groups needs to be removed and which to be added + groups_to_remove = [ + group_id + for group_id in groups_ids + if group_id not in update_data["groups"] + ] + + groups_to_add = [ + group for group in update_data["groups"] if group not in groups_ids + ] + # remove + for group in groups_to_remove: + self.logger.info("Leaving group") + resp = self.keycloak_admin.connection.raw_delete( + f"{self.admin_url_without_client}/users/{user_id}/groups/{group}" + ) + resp.raise_for_status() + self.logger.info("Left group") + # add + for group in groups_to_add: + self.logger.info("Joining group") + self.add_user_to_group(user_id=user_id, group=group) + self.logger.info("Joined group") + return {"status": "success", "message": "User updated successfully"} + except KeycloakPostError as e: + self.logger.error("Failed to update user in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to update user") + + def delete_user(self, user_email: str) -> dict: + try: + user_id = self.get_user_id_by_email(user_email) + self.keycloak_admin.delete_user(user_id) + # delete the policy for the user (if not implicitly deleted?) + return {"status": "success", "message": "User deleted successfully"} + except KeycloakDeleteError as e: + self.logger.error("Failed to delete user from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to delete user") + + def get_auth_verifier(self, scopes: list) -> AuthVerifierBase: + return KeycloakAuthVerifier(scopes) + + def create_resource( + self, + resource_name: str, + scopes: list[str] = [], + resource_type="keep_generic", + attributes={}, + ) -> None: + resource = { + "name": resource_name, + "displayName": f"Resource for {resource_name}", + "type": "urn:keep:resources:" + resource_type, + "scopes": [{"name": scope} for scope in scopes], + "attributes": attributes, + } + try: + self.keycloak_admin.create_client_authz_resource(self.client_id, resource) + except KeycloakPostError as e: + if "already exists" in str(e): + self.logger.info("Resource already exists in Keycloak") + pass + else: + self.logger.error("Failed to create resource in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to create resource") + + def delete_resource(self, resource_id: str) -> None: + try: + resources = self.keycloak_admin.get_client_authz_resources( + os.environ["KEYCLOAK_CLIENT_ID"] + ) + for resource in resources: + if resource["uris"] == ["/resource/" + resource_id]: + self.keycloak_admin.delete_client_authz_resource( + os.environ["KEYCLOAK_CLIENT_ID"], resource["id"] + ) + except KeycloakDeleteError as e: + self.logger.error("Failed to delete resource from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to delete resource") + + def get_groups(self) -> list[dict]: + try: + groups = self.keycloak_admin.get_groups( + query={"briefRepresentation": False} + ) + result = [] + for group in groups: + group_id = group["id"] + group_name = group["name"] + roles = group.get("clientRoles", {}).get("keep", []) + + # Fetch members for each group + members = self.keycloak_admin.get_group_members(group_id) + member_names = [member.get("email", "") for member in members] + member_count = len(members) + + result.append( + Group( + id=group_id, + name=group_name, + roles=roles, + memberCount=member_count, + members=member_names, + ) + ) + return result + except KeycloakGetError as e: + self.logger.error("Failed to fetch groups from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to fetch groups") + + def create_user_policy(self, perm, permission: ResourcePermission) -> None: + # we need the user id from email: + # TODO: this is not efficient, we should cache this + users = self.keycloak_admin.get_users({}) + user = next( + (user for user in users if user.get("email") == perm.id), + None, + ) + if not user: + raise HTTPException(status_code=400, detail="User not found") + resp = self.keycloak_admin.connection.raw_post( + f"{self.admin_url}/authz/resource-server/policy/user", + data=json.dumps( + { + "name": f"Allow user {user.get('id')} to access resource type {permission.resource_type} with name {permission.resource_name}", + "description": json.dumps( + { + "user_id": user.get("id"), + "user_email": user.get("email"), + "resource_id": permission.resource_id, + } + ), + "logic": "POSITIVE", + "users": [user.get("id")], + } + ), + ) + try: + resp.raise_for_status() + # 409 is ok, it means the policy already exists + except Exception as e: + if resp.status_code != 409: + raise e + # just continue to next policy + else: + return None + policy_id = resp.json().get("id") + return policy_id + + def create_group_policy(self, perm, permission: ResourcePermission) -> None: + group_name = perm.id + group = self.keycloak_admin.get_groups(query={"search": perm.id}) + if not group or len(group) > 1: + self.logger.error("Problem with group - should be 1 but got %s", len(group)) + raise HTTPException(status_code=400, detail="Problem with group") + group = group[0] + group_id = group["id"] + resp = self.keycloak_admin.connection.raw_post( + f"{self.admin_url}/authz/resource-server/policy/group", + data=json.dumps( + { + "name": f"Allow group {perm.id} to access resource type {permission.resource_type} with name {permission.resource_name}", + "description": json.dumps( + { + "group_name": group_name, + "group_id": group_id, + "resource_id": permission.resource_id, + } + ), + "logic": "POSITIVE", + "groups": [{"id": group_id, "extendChildren": False}], + "groupsClaim": "", + } + ), + ) + try: + resp.raise_for_status() + # 409 is ok, it means the policy already exists + except Exception as e: + if resp.status_code != 409: + raise e + # just continue to next policy + else: + return None + policy_id = resp.json().get("id") + return policy_id + + def create_permissions(self, permissions: list[ResourcePermission]) -> None: + # create or update + try: + existing_permissions = self.keycloak_admin.get_client_authz_permissions( + self.client_id, + ) + existing_permission_names_to_permissions = { + permission["name"]: permission for permission in existing_permissions + } + for permission in permissions: + # 1. first, create the resource if its not already created + resp = self.keycloak_admin.create_client_authz_resource( + self.client_id, + { + "name": permission.resource_id, + "displayName": permission.resource_name, + "type": "urn:keep:resources:keep_" + permission.resource_type, + "scopes": [], + }, + skip_exists=True, + ) + # 2. create the policy if it doesn't exist: + policies = [] + for perm in permission.permissions: + try: + if perm.type == "user": + policy_id = self.create_user_policy(perm, permission) + if policy_id: + policies.append(policy_id) + else: + self.logger.info("Policy already exists in Keycloak") + else: + policy_id = self.create_group_policy(perm, permission) + if policy_id: + policies.append(policy_id) + else: + self.logger.info("Policy already exists in Keycloak") + + except KeycloakPostError as e: + if "already exists" in str(e): + self.logger.info("Policy already exists in Keycloak") + # its ok! + pass + else: + self.logger.error( + "Failed to create policy in Keycloak: %s", str(e) + ) + raise HTTPException( + status_code=500, detail="Failed to create policy" + ) + except Exception as e: + self.logger.error( + "Failed to create policy in Keycloak: %s", str(e) + ) + raise HTTPException( + status_code=500, detail="Failed to create policy" + ) + + # 3. Finally, create the resource + # 3.0 try to get the resource based permission + permission_name = f"Permission on resource type {permission.resource_type} with name {permission.resource_name}" + if existing_permission_names_to_permissions.get(permission_name): + # update the permission + existing_permissions = existing_permission_names_to_permissions[ + permission_name + ] + existing_permission_id = existing_permissions["id"] + # if no new policies, continue + if not policies: + existing_permissions["policies"] = [] + else: + # add the new policies + associated_policies = self.keycloak_admin.get_client_authz_permission_associated_policies( + self.client_id, existing_permission_id + ) + existing_permissions["policies"] = [ + policy["id"] for policy in associated_policies + ] + existing_permissions["policies"].extend(policies) + # update the policy to include the new policy + resp = self.keycloak_admin.connection.raw_put( + f"{self.admin_url}/authz/resource-server/permission/resource/{existing_permission_id}", + data=json.dumps(existing_permissions), + ) + resp.raise_for_status() + else: + # 3.2 else, create it + self.keycloak_admin.create_client_authz_resource_based_permission( + self.client_id, + { + "type": "resource", + "name": f"Permission on resource type {permission.resource_type} with name {permission.resource_name}", + "scopes": [], + "policies": policies, + "resources": [ + permission.resource_id, + ], + "decisionStrategy": "Affirmative".upper(), + }, + ) + except KeycloakPostError as e: + if "already exists" in str(e): + self.logger.info("Permission already exists in Keycloak") + raise HTTPException(status_code=409, detail="Permission already exists") + else: + self.logger.error( + "Failed to create permissions in Keycloak: %s", str(e) + ) + raise HTTPException( + status_code=500, detail="Failed to create permissions" + ) + except Exception as e: + self.logger.error("Failed to create permissions in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to create permissions") + + def get_permissions(self) -> list[ResourcePermission]: + try: + resources = self.keycloak_admin.get_client_authz_resources(self.client_id) + resources_to_policies = {} + permissions = self.keycloak_admin.get_client_authz_permissions( + self.client_id + ) + for permission in permissions: + # if its a scope permission, skip it + if permission["type"] == "scope": + continue + permission_id = permission["id"] + associated_policies = ( + self.keycloak_admin.get_client_authz_permission_associated_policies( + self.client_id, permission_id + ) + ) + for policy in associated_policies: + try: + details = json.loads(policy["description"]) + # with Keep convention, the description should be a json + except json.JSONDecodeError: + self.logger.warning( + "Failed to parse policy description: %s", + policy["description"], + ) + continue + resource_id = details["resource_id"] + if resource_id not in resources_to_policies: + resources_to_policies[resource_id] = [] + if policy.get("type") == "user": + user_email = details.get("user_email") + resources_to_policies[resource_id].append( + {"id": user_email, "type": "user"} + ) + else: + group_name = details.get("group_name") + resources_to_policies[resource_id].append( + {"id": group_name, "type": "group"} + ) + permissions_dto = [] + for resource in resources: + resource_id = resource["name"] + resource_name = resource["displayName"] + resource_type = resource["type"] + permissions_dto.append( + ResourcePermission( + resource_id=resource_id, + resource_name=resource_name, + resource_type=resource_type, + permissions=[ + PermissionEntity( + id=policy["id"], + name=policy.get("name", ""), + type=policy["type"], + ) + for policy in resources_to_policies.get(resource_id, []) + ], + ) + ) + return permissions_dto + except KeycloakGetError as e: + self.logger.error("Failed to fetch permissions from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to fetch permissions") + except Exception as e: + self.logger.error("Failed to fetch permissions from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to fetch permissions") + + # TODO: this should use UMA and not evaluation since evaluation needs admin access + def get_user_permission_on_resource_type( + self, resource_type: str, authenticated_entity: AuthenticatedEntity + ) -> list[ResourcePermission]: + """ + Get permissions for a specific user on a specific resource type. + + Args: + resource_type (str): The type of resource for which to retrieve permissions. + user_id (str): The ID of the user for which to retrieve permissions. + + Returns: + list: A list of permission objects. + """ + # there is two ways to do this: + # 1. admin api + # 2. token endpoint directly + # we will use the admin api and put (2) on TODO + # https://keycloak.discourse.group/t/keyycloak-authz-policy-evaluation-using-rest-api/798/2 + # https://keycloak.discourse.group/t/how-can-i-evaluate-user-permission-over-rest-api/10619 + + # also, we should see how it scale with many resources + try: + user_id = self.keycloak_admin.get_user_id(authenticated_entity.email) + resource_type = f"urn:keep:resources:keep_{resource_type}" + resp = self.keycloak_admin.connection.raw_post( + f"{self.admin_url}/authz/resource-server/policy/evaluate", + data=json.dumps( + { + "userId": user_id, + "resources": [ + { + "type": resource_type, + } + ], + "context": {"attributes": {}}, + "clientId": self.client_id, + } + ), + ) + results = resp.json() + results = results.get("results", []) + allowed_resources_ids = [ + result["resource"]["name"] + for result in results + if result["status"] == "PERMIT" + ] + # there is some bug/limitation in keycloak where if the resource_type does not exist, it returns + # all other objects, so lets handle it by checking if the word "with" is one of the results name + if any("with" in result for result in allowed_resources_ids): + return [] + return allowed_resources_ids + except Exception as e: + self.logger.error( + "Failed to fetch user permissions from Keycloak: %s", str(e) + ) + raise HTTPException( + status_code=500, detail="Failed to fetch user permissions" + ) + + def get_policies(self) -> list[dict]: + try: + policies = self.keycloak_admin.connection.raw_get( + f"{self.admin_url}/authz/resource-server/policy" + ).json() + return policies + except KeycloakGetError as e: + self.logger.error("Failed to fetch policies from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to fetch policies") + + def get_roles(self) -> list[Role]: + """ + Get roles in the identity manager for authorization purposes. + + This method is used to retrieve the roles that have been defined + in the identity manager. It returns a list of role objects, each + containing the resource, scope, and user or group information. + + # TODO: Still to review if this is the correct way to fetch roles + """ + try: + roles = self.keycloak_admin.get_client_roles( + self.client_id, brief_representation=False + ) + # filter out the uma role + roles = [role for role in roles if role["name"] != "uma_protection"] + roles_dto = { + role.get("id"): Role( + id=role.get("id"), + name=role["name"], + description=role["description"], + scopes=set([]), # will populate this later + predefined=( + True + if role.get("attributes", {}).get("predefined", ["false"])[0] + == "true" + else False + ), + ) + for role in roles + } + # now for each role we need to get the scopes + policies = self.keycloak_admin.get_client_authz_policies(self.client_id) + roles_related_policies = [ + policy + for policy in policies + if policy.get("config", {}).get("roles", []) + ] + for policy in roles_related_policies: + role_id = json.loads(policy["config"]["roles"])[0].get("id") + policy_id = policy["id"] + # get dependent permissions + dependentPolicies = self.keycloak_admin.connection.raw_get( + f"{self.admin_url}/authz/resource-server/policy/{policy_id}/dependentPolicies", + ).json() + dependentPoliciesId = dependentPolicies[0].get("id") + scopes = self.keycloak_admin.connection.raw_get( + f"{self.admin_url}/authz/resource-server/policy/{dependentPoliciesId}/scopes", + ).json() + scope_names = [scope["name"] for scope in scopes] + # happens only when delete role fails from some resaon + if role_id not in roles_dto: + self.logger.warning("Role not found for policy, skipping") + continue + roles_dto[role_id].scopes.update(scope_names) + return list(roles_dto.values()) + except KeycloakGetError as e: + self.logger.error("Failed to fetch roles from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to fetch roles") + + def get_role_by_role_name(self, role_name: str) -> Role: + roles = self.get_roles() + role = next((role for role in roles if role.name == role_name), None) + if not role: + self.logger.error("Role not found") + raise HTTPException(status_code=404, detail="Role not found") + return role + + def delete_role(self, role_id: str) -> None: + try: + # delete the role + resp = self.keycloak_admin.connection.raw_delete( + f"{self.admin_url_without_client}/roles-by-id/{role_id}", + ) + resp.raise_for_status() + # delete the policy + policies = self.get_policies() + for policy in policies: + roles = json.loads(policy.get("config", {}).get("roles", "{}")) + if roles and roles[0].get("id") == role_id: + policy_id = policy.get("id") + break + + if not policy_id: + self.logger.warning("Policy not found for role deletion, skipping") + else: + self.logger.info("Deleteing policy id") + self.keycloak_admin.delete_client_authz_policy( + self.client_id, policy_id + ) + self.logger.info("Policy id deleted") + # permissions gets deleted impliclty when we delete the policy + except KeycloakDeleteError as e: + self.logger.error("Failed to delete role from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to delete role") + + def create_group( + self, group_name: str, members: list[str], roles: list[str] + ) -> None: + try: + # create it + group_id = self.keycloak_admin.create_group( + { + "name": group_name, + } + ) + # add members + for member in members: + user_id = self.get_user_id_by_email(member) + self.keycloak_admin.group_user_add(user_id=user_id, group_id=group_id) + # assign roles + for role in roles: + role_id = self.keycloak_admin.get_client_role_id(self.client_id, role) + self.keycloak_admin.assign_group_client_roles( + client_id=self.client_id, + group_id=group_id, + roles=[{"id": role_id, "name": role}], + ) + except KeycloakPostError as e: + if "already exists" in str(e): + self.logger.info("Group already exists in Keycloak") + pass + else: + self.logger.error("Failed to create group in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to create group") + + def update_group( + self, group_name: str, members: list[str], roles: list[str] + ) -> None: + try: + # get the group id + groups = self.keycloak_admin.get_groups(query={"search": group_name}) + if not groups: + self.logger.error("Group not found") + raise HTTPException(status_code=404, detail="Group not found") + group_id = groups[0]["id"] + # check what members needs to be added and which to be removed + existing_members = self.keycloak_admin.get_group_members(group_id) + existing_members = [member.get("email") for member in existing_members] + members_to_add = [ + member for member in members if member not in existing_members + ] + members_to_remove = [ + member for member in existing_members if member not in members + ] + # remove members + for member in members_to_remove: + user_id = self.get_user_id_by_email(member) + self.keycloak_admin.group_user_remove( + user_id=user_id, group_id=group_id + ) + + # add members + for member in members_to_add: + user_id = self.get_user_id_by_email(member) + self.keycloak_admin.group_user_add(user_id=user_id, group_id=group_id) + + # check what roles needs to be added and which to be removed + existing_roles = self.keycloak_admin.get_group_client_roles( + client_id=self.client_id, group_id=group_id + ) + existing_roles = [role["name"] for role in existing_roles] + roles_to_add = [role for role in roles if role not in existing_roles] + roles_to_remove = [role for role in existing_roles if role not in roles] + # remove roles + for role in roles_to_remove: + role_id = self.keycloak_admin.get_client_role_id(self.client_id, role) + self.keycloak_admin.connection.raw_delete( + f"{self.admin_url_without_client}/groups/{group_id}/role-mappings/clients/{self.client_id}", + payload={ + "client": self.client_id, + "group": group_id, + "roles": [{"id": role_id, "name": role}], + }, + ) + # assign roles + for role in roles_to_add: + role_id = self.keycloak_admin.get_client_role_id(self.client_id, role) + self.keycloak_admin.assign_group_client_roles( + client_id=self.client_id, + group_id=group_id, + roles=[{"id": role_id, "name": role}], + ) + except KeycloakPostError as e: + self.logger.error("Failed to update group in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to update group") + + def delete_group(self, group_name: str) -> None: + try: + groups = self.keycloak_admin.get_groups(query={"search": group_name}) + if not groups: + self.logger.error("Group not found") + raise HTTPException(status_code=404, detail="Group not found") + group_id = groups[0]["id"] + self.keycloak_admin.delete_group(group_id) + except KeycloakDeleteError as e: + self.logger.error("Failed to delete group from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to delete group") diff --git a/elk/README.md b/elk/README.md new file mode 100644 index 0000000000..395dba0e3e --- /dev/null +++ b/elk/README.md @@ -0,0 +1,102 @@ +# ELK-stack integration + +This directory contains the configuration files and Docker services needed to run Keep with a filebeat container. Useful if you want to test integration of Keep backend logs with Logstash and Kibana. + +## Directory Structure + +``` +proxy/ +├── docker-compose-elk.yml # Docker Compose configuration for elk integtation +├── filebeat.yaml # Filebeat configuration file +├── logstash.conf # Logstash configuration example to save keep-backend logs +└── README.md # This files +``` + +## Components + +The setup consists of several services: + +- **Filebeat**: Filebeat container to push keep-backend logs to logstash +- **Keep Frontend**: The Keep UI service configured to use the proxy +- **Keep Backend**: The Keep API service +- **Keep WebSocket**: The WebSocket server for real-time updates + +## Configuration + +### Environment Variables + +```env +LOGSTASH_HOST=logstash-host +LOGSTASH_PORT=5044 +``` + +### Usage + +1. Start the elk environment: + +```bash +docker compose -f docker-compose-elk.yml up +``` + +2. To run in detached mode: + +```bash +docker compose -f docker-compose-elk.yml up -d +``` + +3. To stop all services: + +```bash +docker compose -f docker-compose-elk.yml down +``` + +### Accessing Services + +- Keep Backend: http://localhost:8080 +- Kibana: http://localhost:5601 + +### Kibana configuration + +- Goto http://localhost:5601/app/discover +- Click "Create Data view" +- Add any name you want +- Add index pattern to `keep-backend-logs-*` +- Save data view and insect logs + + +## Custom Configuration + +### Modifying Proxy Settings + +To modify the Filebeat configuration: + +1. Edit `filebeat.yml` +2. Restart the filebeat service: + +```bash +docker compose -f docker-compose-elk.yml restart filebeat +``` + +### Modifying Logstash Settings + +To modify the Logstash configuration: + +1. Edit `logstash.conf` +2. Restart the logstash service: + +```bash +docker compose -f docker-compose-elk.yml restart logstash +``` + +## Security Considerations + +- This setup is intended for development environments only +- SSL is disabled for all services for simplification + +## Contributing + +When modifying the elk setup: + +1. Document any changes to configuration files +2. Test the setup of elk environments +3. Update this README if adding new features or configurations diff --git a/elk/docker-compose-elk.yml b/elk/docker-compose-elk.yml new file mode 100644 index 0000000000..6c4b256c27 --- /dev/null +++ b/elk/docker-compose-elk.yml @@ -0,0 +1,91 @@ +services: + keep-backend-elk: + extends: + file: ../docker-compose.common.yml + service: keep-backend-common + image: us-central1-docker.pkg.dev/keephq/keep/keep-api + environment: + - AUTH_TYPE=NO_AUTH + volumes: + - ./state:/state + + keep-websocket-server: + extends: + file: ../docker-compose.common.yml + service: keep-websocket-server-common + + elastic: + image: docker.elastic.co/elasticsearch/elasticsearch:8.17.0 + labels: + co.elastic.logs/module: elasticsearch + volumes: + - elastic_data:/usr/share/elasticsearch/data + ports: + - "9200:9200" + environment: + - node.name=elastic + - cluster.name=keep-elk + - discovery.type=single-node + - ELASTIC_PASSWORD=elastic + - bootstrap.memory_lock=true + - xpack.security.enabled=false + - xpack.security.enrollment.enabled=false + - xpack.security.transport.ssl.enabled=false + - xpack.license.self_generated.type=basic + + kibana: + depends_on: + - elastic + image: docker.elastic.co/kibana/kibana:8.17.0 + labels: + co.elastic.logs/module: kibana + volumes: + - kibana_data:/usr/share/kibana/data + ports: + - 5601:5601 + environment: + - SERVERNAME=kibana + - ELASTICSEARCH_HOSTS=http://elastic:9200 + - ELASTICSEARCH_USERNAME=kibana_system + - ELASTICSEARCH_PASSWORD=kibana + - XPACK_APM_SERVICEMAPENABLED="true" + - XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=${ENCRYPTION_KEY} + + filebeat: + image: docker.elastic.co/beats/filebeat:8.17.0 + container_name: filebeat + user: root + volumes: + - /var/lib/docker/containers:/var/lib/docker/containers:ro + - /var/run/docker.sock:/var/run/docker.sock:ro + - ./filebeat.yml:/usr/share/filebeat/filebeat.yml:ro + environment: + - LOGSTASH_HOST=logstash01 + command: [ "--strict.perms=false" ] # Disable strict permissions to avoid permission errors + + logstash: + depends_on: + - elastic + - kibana + image: docker.elastic.co/logstash/logstash:8.17.0 + labels: + co.elastic.logs/module: logstash + user: root + ports: + - "5001:5000" + - "5044:5044" + - "9600:9600" + volumes: + - logstash_data:/usr/share/logstash/data + - "./logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro" + environment: + - xpack.monitoring.enabled=false + - ELASTIC_USER=elastic + - ELASTIC_PASSWORD=elastic + - ELASTIC_HOSTS=http://elastic:9200 + + +volumes: + elastic_data: + kibana_data: + logstash_data: diff --git a/elk/filebeat.yml b/elk/filebeat.yml new file mode 100644 index 0000000000..205c084324 --- /dev/null +++ b/elk/filebeat.yml @@ -0,0 +1,22 @@ +filebeat.inputs: + - type: container + paths: + - /var/lib/docker/containers/*/*.log + stream: stdout # Only capture stdout + json.keys_under_root: true # Parse JSON-formatted logs automatically + json.add_error_key: true # Add error field if JSON parsing fails + processors: + - decode_json_fields: + fields: [ "message" ] # Try to decode the `message` field as JSON + target: "" # Merge decoded fields at the root level + overwrite_keys: true # Overwrite existing keys if present + - add_docker_metadata: # Enrich logs with Docker metadata + host: "unix:///var/run/docker.sock" + - drop_event: + when.not.contains.container.labels: + com_docker_compose_service: "keep-backend-elk" + +output.logstash: + hosts: ["logstash:5044"] # Replace with your Logstash host and port + +logging.level: info # Set Filebeat logging level diff --git a/elk/logstash.conf b/elk/logstash.conf new file mode 100644 index 0000000000..6f5aeb239f --- /dev/null +++ b/elk/logstash.conf @@ -0,0 +1,19 @@ +input { + beats { + port => 5044 # Match the port used in Filebeat configuration + } +} + +filter { + json { + source => "message" + } +} + +output { + stdout { codec => rubydebug } # For debugging + elasticsearch { + hosts => ["http://elastic:9200"] + index => "keep-backend-logs-%{+YYYY.MM.dd}" + } +} diff --git a/examples/providers/airflow-prod.yaml b/examples/providers/airflow-prod.yaml new file mode 100644 index 0000000000..c3bb9aef47 --- /dev/null +++ b/examples/providers/airflow-prod.yaml @@ -0,0 +1,11 @@ +name: airflow-prod +type: airflow +deduplication_rules: + airflow-prod-default: + description: "Default deduplication rule for Airflow Production" + fingerprint_fields: + - fingerprint + full_deduplication: true + ignore_fields: + - name + - lastReceived diff --git a/examples/providers/telegram-bot.yaml b/examples/providers/telegram-bot.yaml new file mode 100644 index 0000000000..02c48e495e --- /dev/null +++ b/examples/providers/telegram-bot.yaml @@ -0,0 +1,5 @@ +name: telegram-bot +type: telegram +authentication: + # Use environment variables to store sensitive information + bot_token: "$(TELEGRAM_BOT_TOKEN)" diff --git a/examples/workflows/aks_basic.yml b/examples/workflows/aks_basic.yml index aeeb7f1403..ebb27f5570 100644 --- a/examples/workflows/aks_basic.yml +++ b/examples/workflows/aks_basic.yml @@ -1,6 +1,7 @@ workflow: - id: aks-example - description: aks-example + id: aks-pod-status-monitor + name: AKS Pod Status Monitor + description: Retrieves and displays status information for all pods in an AKS cluster, including pod names, namespaces, and current phase. triggers: - type: manual steps: @@ -17,4 +18,4 @@ workflow: provider: type: console with: - alert_message: "Pod name: {{ foreach.value.metadata.name }} || Namespace: {{ foreach.value.metadata.namespace }} || Status: {{ foreach.value.status.phase }}" + message: "Pod name: {{ foreach.value.metadata.name }} || Namespace: {{ foreach.value.metadata.namespace }} || Status: {{ foreach.value.status.phase }}" diff --git a/examples/workflows/autosupress.yml b/examples/workflows/autosupress.yml index 3e6e85cf47..20c3d6a658 100644 --- a/examples/workflows/autosupress.yml +++ b/examples/workflows/autosupress.yml @@ -1,7 +1,8 @@ workflow: - id: autosupress + id: automatic-alert-suppression + name: Automatic Alert Suppression strategy: parallel - description: demonstrates how to automatically suppress alerts + description: Automatically suppresses incoming alerts by marking them as dismissed, useful for handling known or expected alert conditions. triggers: - type: alert actions: diff --git a/examples/workflows/bash_example.yml b/examples/workflows/bash_example.yml index 4b5518ef93..9bec0593e9 100644 --- a/examples/workflows/bash_example.yml +++ b/examples/workflows/bash_example.yml @@ -1,29 +1,30 @@ workflow: - id: Resend-Python-service - description: Python Resend Mail + id: python-service-monitor + name: Python Service Monitor + description: Monitors a Python service by executing a test script and sends email notifications via Resend when the service is operational. triggers: - - type: manual + - type: manual owners: [] services: [] steps: - - name: run-script - provider: - config: '{{ providers.default-bash }}' - type: bash - with: - command: python3 test.py - timeout: 5 + - name: run-script + provider: + config: "{{ providers.default-bash }}" + type: bash + with: + command: python3 test.py + timeout: 5 actions: - - condition: - - assert: '{{ steps.run-script.results.return_code }} == 0' - name: assert-condition - type: assert - name: trigger-resend - provider: - type: resend - config: "{{ providers.resend-test }}" - with: - _from: "onboarding@resend.dev" - to: "youremail.dev@gmail.com" - subject: "Python test is up!" - html:

Python test is up!

+ - condition: + - assert: "{{ steps.run-script.results.return_code }} == 0" + name: assert-condition + type: assert + name: trigger-resend + provider: + type: resend + config: "{{ providers.resend-test }}" + with: + _from: "onboarding@resend.dev" + to: "youremail.dev@gmail.com" + subject: "Python test is up!" + html:

Python test is up!

diff --git a/examples/workflows/bigquery.yml b/examples/workflows/bigquery.yml index 10ea244c83..8ddf0372d1 100644 --- a/examples/workflows/bigquery.yml +++ b/examples/workflows/bigquery.yml @@ -1,6 +1,9 @@ -alert: - id: bq-sql-query - description: Monitor that time difference is no more than 1 hour +workflow: + id: bigquery-data-freshness-monitor + name: BigQuery Data Freshness Monitor + description: Monitors data freshness in BigQuery tables by checking time differences and querying public datasets for validation. + triggers: + - type: manual steps: - name: get-max-datetime provider: @@ -12,63 +15,7 @@ alert: - name: runbook-step1-bigquery-sql provider: type: bigquery - config: "{{ providers.bigquery-prod }}" + config: "{{ providers.bigquery }}" with: # Get max(datetime) from the random table query: "SELECT * FROM `bigquery-public-data.austin_bikeshare.bikeshare_stations` LIMIT 10" - actions: - - name: opsgenie-alert - condition: - - name: threshold-condition - type: threshold - # datetime_compare(t1, t2) compares t1-t2 and returns the diff in hours - # utcnow() returns the local machine datetime in UTC - # to_utc() converts a datetime to UTC - value: keep.datetime_compare(keep.utcnow(), keep.to_utc("{{ steps.get-max-datetime.results[0][date] }}")) - compare_to: 1 # hours - compare_type: gt # greater than - # Give it an alias so we can use it in the slack action - alias: A - provider: - type: opsgenie - config: " {{ providers.opsgenie-prod }} " - with: - message: "DB datetime value ({{ actions.opsgenie-alert.conditions.threshold-condition.0.compare_value }}) is greater than 1! 🚨" - - name: trigger-slack - if: "{{ A }}" - provider: - type: slack - config: " {{ providers.slack-prod }} " - with: - message: "DB datetime value ({{ actions.opsgenie-alert.conditions.threshold-condition.0.compare_value }}) is greater than 1! 🚨" - - name: trigger-slack-2 - if: "{{ A }}" - provider: - type: slack - config: " {{ providers.slack-prod }} " - with: - blocks: - - type: header - text: - type: plain_text - text: "Adding some context to the alert:" - emoji: true - - type: section - text: - type: mrkdwn - text: |- - {{#steps.runbook-step1-bigquery-sql.results}} - - Station id: {{station_id}} | Status: {{status}} - {{/steps.runbook-step1-bigquery-sql.results}} - - -providers: - bigquery-prod: - description: BigQuery Prod - authentication: - opsgenie-prod: - authentication: - api_key: "{{ env.OPSGENIE_API_KEY }}" - slack-prod: - authentication: - webhook_url: "{{ env.SLACKDEMO_WEBHOOK }}" diff --git a/examples/workflows/blogpost.yml b/examples/workflows/blogpost.yml index 367981bab6..a863d7aae4 100644 --- a/examples/workflows/blogpost.yml +++ b/examples/workflows/blogpost.yml @@ -1,6 +1,7 @@ workflow: - id: blogpost-workflow - description: Enrich the alerts and open ticket + id: critical-alert-enrichment + name: Critical Alert Enrichment + description: Enriches critical alerts with customer information from MySQL and creates ServiceNow incident tickets with detailed context. triggers: # filter on critical alerts - type: alert @@ -17,13 +18,13 @@ workflow: query: "select * from blogpostdb.customer where customer_id = '{{ alert.customer_id }}'" single_row: true as_dict: true - enrich_alert: - - key: customer_name - value: results.name - - key: customer_email - value: results.email - - key: customer_tier - value: results.tier + enrich_alert: + - key: customer_name + value: results.name + - key: customer_email + value: results.email + - key: customer_tier + value: results.tier actions: # Create service now incident ticket - name: create-service-now-ticket diff --git a/examples/workflows/businesshours.yml b/examples/workflows/businesshours.yml new file mode 100644 index 0000000000..bc813bbb81 --- /dev/null +++ b/examples/workflows/businesshours.yml @@ -0,0 +1,16 @@ +workflow: + id: business-hours-alert-handler + name: Business Hours Alert Handler + description: Processes alerts only during specified business hours in the America/New York timezone, preventing off-hours notifications. + triggers: + - type: alert + - type: manual + actions: + - name: dismiss-alert + if: "keep.is_business_hours(timezone='America/New_York')" + provider: + type: mock + with: + enrich_alert: + - key: buisnesshours + value: "true" diff --git a/examples/workflows/change.yml b/examples/workflows/change.yml index 605b010beb..0228ce4bed 100644 --- a/examples/workflows/change.yml +++ b/examples/workflows/change.yml @@ -1,6 +1,7 @@ workflow: - id: on-field-change - description: demonstrates how to trigger a workflow when a field changes + id: alert-status-change-monitor + name: Alert Status Change Monitor + description: Triggers workflow actions specifically when an alert's status field changes, useful for status-based notifications. triggers: - type: alert only_on_change: @@ -10,4 +11,4 @@ workflow: provider: type: console with: - alert_message: "Hello world" + message: "Hello world" diff --git a/examples/workflows/clickhouse_multiquery.yml b/examples/workflows/clickhouse_multiquery.yml new file mode 100644 index 0000000000..a3e87ef935 --- /dev/null +++ b/examples/workflows/clickhouse_multiquery.yml @@ -0,0 +1,61 @@ +workflow: + id: clickhouse-multi-query-monitor + name: ClickHouse Multi-Query Monitor + description: Executes multiple ClickHouse queries to monitor system health and creates ServiceNow tickets when issues are detected. + triggers: + - type: manual + + steps: + - name: clickhouse-observability-urls + provider: + config: "{{ providers.clickhouse }}" + type: clickhouse + with: + query: | + SELECT Url, Status FROM "observability"."Urls" + WHERE ( Url LIKE '%te_tests%' ) AND Timestamp >= toStartOfMinute(date_add(toDateTime(NOW()), INTERVAL -1 MINUTE)) AND Status = 0; + + - name: clickhouse-observability-events + provider: + config: "{{ providers.clickhouse }}" + type: clickhouse + with: + query: | + SELECT arrayElement(Metrics.testName, 1) AS mytest FROM observability.Events + WHERE (Sources = 'ThousandEyes') AND (Timestamp >= toStartOfMinute(toDateTime(NOW()) + toIntervalMinute(-1))) AND (mytest = 'Oceanspot-TE') + + - name: clickhouse-observability-traces + provider: + config: "{{ providers.clickhouse }}" + type: clickhouse + with: + query: | + SELECT count(*) as c FROM "observability"."Traces" + WHERE ( SpanName LIKE '%te_tests%' ) AND Timestamp >= toStartOfMinute(date_add(toDateTime(NOW()), INTERVAL -1 MINUTE)); + + - name: clickhouse-observability-follow-up-query + # if any of the previous queries return results, run this query + if: keep.len( {{ steps.clickhouse-observability-urls.results }} ) or keep.len( {{ steps.clickhouse-observability-events.results }} ) or keep.len( {{ steps.clickhouse-observability-traces.results }} ) + provider: + config: "{{ providers.clickhouse }}" + type: clickhouse + with: + query: | + SELECT Url, Status FROM "observability"."Urls" + WHERE ( Url LIKE '%te_tests%' ) AND Timestamp >= toStartOfMinute(date_add(toDateTime(NOW()), INTERVAL -1 MINUTE)) AND Status = 0; + + actions: + - name: snow-action + # if any of the previous queries return results, run this query + if: keep.len( {{ steps.clickhouse-observability-urls.results }} ) or keep.len( {{ steps.clickhouse-observability-events.results }} ) or keep.len( {{ steps.clickhouse-observability-traces.results }} ) + provider: + type: servicenow + config: "{{ providers.servicenow }}" + with: + table_name: "yourtablename" + payload: + short_description: "Results returned for clickhouse-observability" + description: | + Urls: {{ steps.clickhouse-observability-urls.results }} + Events: {{ steps.clickhouse-observability-events.results }} + Traces: {{ steps.clickhouse-observability-traces.results }} diff --git a/examples/workflows/complex-conditions-cel.yml b/examples/workflows/complex-conditions-cel.yml new file mode 100644 index 0000000000..5daef5cae1 --- /dev/null +++ b/examples/workflows/complex-conditions-cel.yml @@ -0,0 +1,13 @@ +workflow: + id: complex-conditions-monitor-cel + name: Complex Conditions Monitor (CEL) + description: Monitors alerts with complex conditions using CEL filters. + triggers: + - type: alert + cel: (source.contains("datadog") && severity == "critical") || (source.contains("newrelic") && severity == "error") + actions: + - name: notify + provider: + type: console + with: + message: "Critical Datadog or error NewRelic alert: {{ alert.name }}" diff --git a/examples/workflows/conditionally_run_if_ai_says_so.yaml b/examples/workflows/conditionally_run_if_ai_says_so.yaml new file mode 100644 index 0000000000..3c08f32a7e --- /dev/null +++ b/examples/workflows/conditionally_run_if_ai_says_so.yaml @@ -0,0 +1,38 @@ +workflow: + id: ai-guided-mysql-cleanup + name: AI-Guided MySQL Cleanup + description: Uses OpenAI to intelligently determine whether to run MySQL table cleanup operations based on alert context. + triggers: + - type: incident + events: + - updated + - created + steps: + - name: ask-openai-if-this-workflow-is-applicable + provider: + config: "{{ providers.my_openai }}" + type: openai + with: + prompt: "There is a task cleaning MySQL database. Should we run the task if we received an alert with such a name {{ alert.name }}?" + model: "gpt-4o-mini" # This model supports structured output + structured_output_format: # We limit what model could return + type: json_schema + json_schema: + name: workflow_applicability + schema: + type: object + properties: + should_run: + type: boolean + description: "Whether the workflow should be executed based on the alert" + required: ["should_run"] + additionalProperties: false + strict: true + actions: + - name: clean-db-step + if: "{{ steps.ask-openai-if-this-workflow-is-applicable.results.response.should_run }}" + provider: + config: "{{ providers.mysql }}" + type: mysql + with: + query: DELETE FROM bookstore.cache ORDER BY id DESC LIMIT 100; diff --git a/examples/workflows/console_example.yml b/examples/workflows/console_example.yml new file mode 100644 index 0000000000..a8d25ff5c7 --- /dev/null +++ b/examples/workflows/console_example.yml @@ -0,0 +1,13 @@ +workflow: + id: console-logger + name: Console Logger + description: Simple workflow demonstrating console logging functionality with customizable messages. + triggers: + - type: manual + actions: + - name: echo + provider: + type: console + with: + logger: true + message: "Hey" diff --git a/examples/workflows/consts_and_dict.yml b/examples/workflows/consts_and_dict.yml new file mode 100644 index 0000000000..e72ea6b5df --- /dev/null +++ b/examples/workflows/consts_and_dict.yml @@ -0,0 +1,33 @@ +workflow: + id: consts-severity-queries-mapping + name: Severity and Queries Mapping Example + description: Demonstrates how to use constant mappings to standardize alert severity levels and queries. + triggers: + - type: manual + consts: + ts: 1748465504 + queries: + get-all-tables: + query: "SELECT table_name FROM information_schema.tables;" + user-query: + query: "select * from user where user.id == %user_id%;" + severities: + s1: critical + s2: error + s3: warning + s4: info + critical: critical + error: error + steps: + - name: print-user-query + provider: + type: console + with: + message: keep.replace('{{consts.queries.user-query.query}}', '%user_id%', '999') # will print "select * from user where user.id == 999;" + actions: + - name: echo + provider: + type: console + with: + logger: true + message: keep.dictget({{ consts.severities }}, '{{ alert.severity }}', 'info') diff --git a/examples/workflows/consts_and_vars.yml b/examples/workflows/consts_and_vars.yml new file mode 100644 index 0000000000..79a936fc55 --- /dev/null +++ b/examples/workflows/consts_and_vars.yml @@ -0,0 +1,147 @@ +workflow: + id: tiered-alert-notification-system + name: Tiered Alert Notification System + description: Implements a sophisticated multi-tier alert notification system with escalating notifications to email and Slack based on alert duration. + triggers: + - type: alert + filters: + - key: source + value: "openobserve" + + # consts block for email_template and slack_message + consts: + email_template: | + Hi,
+ This {{ vars.alert_tier }} is triggered because the pipelines for {{ alert.host }} are down for more than keep.get_firing_time('{{ alert }}', 'minutes') minutes.
+ Please visit monitoring.keeohq.dev for more!
+ Regards,
+ KeepHQ dev Monitoring
+ + slack_message: | + {{ vars.alert_tier }} Alert: SA Pipelines are down + + Hi, + This {{ vars.alert_tier }} alert is triggered because the pipelines for {{ alert.host }} are down for more than keep.get_firing_time('{{ alert }}', 'minutes') minutes. + Please visit monitoring.keeohq.dev for more! + + actions: + # Sendgrid Tier 0 Alert + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 0 and keep.get_firing_time('{{ alert }}', 'minutes') < 10" + name: Sendgrid_Tier_0_alert + vars: + alert_tier: "Alert 0" + provider: + config: "{{ providers.Sendgrid }}" + type: sendgrid + with: + to: + - "shahar@keephq.dev" + subject: '"Tier 0 Alert: SA Pipelines are down"' + html: "{{ consts.email_template }}" + + # Sendgrid Tier 1 Alert + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 10 and keep.get_firing_time('{{ alert }}', 'minutes') < 15" + name: Sendgrid_Tier_1_alert + vars: + alert_tier: "Alert 1" + provider: + config: "{{ providers.Sendgrid }}" + type: sendgrid + with: + to: + - "shahar@keephq.dev" + subject: '"Tier 1 Alert: SA Pipelines are down"' + html: "{{ consts.email_template }}" + + # Sendgrid Tier 2 Alert + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 60 and keep.get_firing_time('{{ alert }}', 'minutes') < 70" + name: Sendgrid_Tier_2_alert + vars: + alert_tier: "Alert 2" + provider: + config: "{{ providers.Sendgrid }}" + type: sendgrid + with: + to: + - "shahar@keephq.dev" + subject: '"Tier 2 Alert: SA Pipelines are down"' + html: "{{ consts.email_template }}" + + # Sendgrid Tier 3 Alert + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 120 and keep.get_firing_time('{{ alert }}', 'minutes') < 130" + name: Sendgrid_Tier_3_alert + vars: + alert_tier: "Alert 3" + provider: + config: "{{ providers.Sendgrid }}" + type: sendgrid + with: + to: + - "shahar@keephq.dev" + subject: '"Tier 3 Alert: SA Pipelines are down"' + html: "{{ consts.email_template }}" + + # Sendgrid Tier 4 Alert + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 1440 and keep.get_firing_time('{{ alert }}', 'minutes') < 1450" + name: Sendgrid_Tier_4_alert + vars: + alert_tier: "Alert 4" + provider: + config: "{{ providers.Sendgrid }}" + type: sendgrid + with: + to: + - "shahar@keephq.dev" + subject: '"Tier 4 Alert: SA Pipelines are down"' + html: "{{ consts.email_template }}" + + # Slack Alerts + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 0 and keep.get_firing_time('{{ alert }}', 'minutes') < 10" + name: Slack_Tier_0_alert + vars: + alert_tier: "Alert 0" + provider: + config: "{{ providers.dev_slack }}" + type: slack + with: + message: "{{ consts.slack_message }}" + + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 10 and keep.get_firing_time('{{ alert }}', 'minutes') < 15" + name: Slack_Tier_1_alert + vars: + alert_tier: "Alert 1" + provider: + config: "{{ providers.dev_slack }}" + type: slack + with: + message: "{{ consts.slack_message }}" + + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 60 and keep.get_firing_time('{{ alert }}', 'minutes') < 70" + name: Slack_Tier_2_alert + vars: + alert_tier: "Alert 2" + provider: + config: "{{ providers.dev_slack }}" + type: slack + with: + message: "{{ consts.slack_message }}" + + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 120 and keep.get_firing_time('{{ alert }}', 'minutes') < 130" + name: Slack_Tier_3_alert + vars: + alert_tier: "Alert 3" + provider: + config: "{{ providers.dev_slack }}" + type: slack + with: + message: "{{ consts.slack_message }}" + + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 1440 and keep.get_firing_time('{{ alert }}', 'minutes') < 1450" + name: Slack_Tier_4_alert + vars: + alert_tier: "Alert 4" + provider: + config: "{{ providers.dev_slack }}" + type: slack + with: + message: "{{ consts.slack_message }}" diff --git a/examples/workflows/create-issue-youtrack.yaml b/examples/workflows/create-issue-youtrack.yaml new file mode 100644 index 0000000000..82b43375bb --- /dev/null +++ b/examples/workflows/create-issue-youtrack.yaml @@ -0,0 +1,19 @@ +workflow: + id: youtrack-issue-creator + name: YouTrack Issue Creator + description: Creates standardized issues in YouTrack with predefined templates and fields. + disabled: false + triggers: + - type: manual + consts: {} + owners: [] + services: [] + steps: [] + actions: + - name: youtrack-action + provider: + type: youtrack + config: "{{ providers.YouTrack }}" + with: + description: Users face random logout issues when logged in through Google OAuth + summary: Login fails with session error diff --git a/examples/workflows/create-new-incident-grafana-incident.yaml b/examples/workflows/create-new-incident-grafana-incident.yaml new file mode 100644 index 0000000000..d185b022df --- /dev/null +++ b/examples/workflows/create-new-incident-grafana-incident.yaml @@ -0,0 +1,23 @@ +workflow: + id: grafana-incident-creator + name: Grafana Incident Creator + description: Creates and manages incidents in Grafana Incident with customizable severity and status. + disabled: false + triggers: + - type: manual + consts: {} + owners: [] + services: [] + steps: [] + actions: + - name: grafana_incident-action + provider: + type: grafana_incident + config: "{{ providers.incide }}" + with: + # Checkout https://docs.keephq.dev/providers/documentation/grafana_incident-provider for other available fields + operationType: create + title: Creating new incident from Keep + severity: critical + status: active + attachURL: https://keephq.dev diff --git a/examples/workflows/create-task-in-asana.yaml b/examples/workflows/create-task-in-asana.yaml new file mode 100644 index 0000000000..dde97fb8f8 --- /dev/null +++ b/examples/workflows/create-task-in-asana.yaml @@ -0,0 +1,22 @@ +workflow: + id: create-task-in-asana + name: Create task in asana + description: asana + disabled: false + triggers: + - type: manual + consts: {} + owners: [] + services: [] + steps: [] + actions: + - name: asana-action + provider: + type: asana + config: "{{ providers.asana }}" + with: + name: This is a test task from Keep + projects: + - "1209746642330536" + assignee: "1209746640089515" + due_at: "2025-09-15 02:06:58.147000+00:00" diff --git a/examples/workflows/create_alert_from_vm_metric.yml b/examples/workflows/create_alert_from_vm_metric.yml new file mode 100644 index 0000000000..0be0a95fe1 --- /dev/null +++ b/examples/workflows/create_alert_from_vm_metric.yml @@ -0,0 +1,45 @@ +# This workflow queries VictoriaMetrics metrics and creates alerts based on CPU usage +workflow: + # Unique identifier for this workflow + id: victoriametrics-cpu-alert + # Display name shown in the UI + name: VictoriaMetrics CPU Alert + # Brief description of what this workflow does + description: Monitors CPU usage metrics from VictoriaMetrics and generates alerts based on configurable thresholds. + + # Define how the workflow is triggered + triggers: + - type: manual # Can be triggered manually from the UI + + # Steps to execute in order + steps: + - name: victoriametrics-step + provider: + # Use VictoriaMetrics provider config defined in providers.vm + config: "{{ providers.vm }}" + type: victoriametrics + with: + # Query average CPU usage rate + query: avg(rate(process_cpu_seconds_total)) + queryType: query + + # Actions to take based on the query results + actions: + - name: create-alert + provider: + type: keep + with: + # Create alert if CPU usage exceeds threshold + if: "{{ value.1 }} > 0.0040" + alert: + name: "High CPU Usage" + description: "[Single] CPU usage is high on the VM (created from VM metric)" + # Set severity based on CPU usage thresholds + severity: '{{ value.1 }} > 0.9 ? "critical" : {{ value.1 }} > 0.7 ? "warning" : "info"' + # Alert labels for filtering and routing + labels: + environment: production + app: myapp + service: api + team: devops + owner: alice diff --git a/examples/workflows/create_alert_in_keep.yml b/examples/workflows/create_alert_in_keep.yml new file mode 100644 index 0000000000..ef262bb193 --- /dev/null +++ b/examples/workflows/create_alert_in_keep.yml @@ -0,0 +1,17 @@ +workflow: + id: keep-alert-generator + name: Keep Alert Generator + description: Creates new alerts within the Keep system with customizable parameters and descriptions. + triggers: + - type: manual + + actions: + - name: create-alert + provider: + type: keep + with: + alert: + name: "Alert created from the workflow" + description: "This alert was created from the create_alert_in_keep.yml example workflow." + labels: + environment: production diff --git a/examples/workflows/create_alerts_from_elastic.yml b/examples/workflows/create_alerts_from_elastic.yml new file mode 100644 index 0000000000..74df228842 --- /dev/null +++ b/examples/workflows/create_alerts_from_elastic.yml @@ -0,0 +1,33 @@ +workflow: + id: elastic-basic + name: Create alerts from Elasticsearch + description: Create alerts from Elastic index (e.g. info alerts) + triggers: + - type: manual + steps: + - name: query-ack-index + provider: + type: elastic + config: " {{ providers.elastic }} " + with: + index: keep-alerts-keep + query: | + { + "query_string": { + "query": "firing" + } + } + actions: + - name: create-alert + provider: + type: keep + with: + override_source_with: "elastic" + read_only: true + fingerprint_fields: + - id + alert: + name: "{{ _source.name }}" + status: "{{ _source.status }}" + host: "{{ _source.host }}" + service: "{{ _source.service }}" diff --git a/examples/workflows/create_alerts_from_mysql.yml b/examples/workflows/create_alerts_from_mysql.yml new file mode 100644 index 0000000000..845eeed2f1 --- /dev/null +++ b/examples/workflows/create_alerts_from_mysql.yml @@ -0,0 +1,41 @@ +workflow: + id: mysql-alert-sync + name: MySQL Alert Sync + description: Synchronizes alerts from a MySQL database into Keep, with configurable intervals and data mapping. + triggers: + # run manually (debugging) + - type: manual + # run 5 minutes + - type: interval + value: 300 + steps: + # get the customer details + - name: get-alerts-from-mysql + provider: + type: mysql + config: " {{ providers.mysql-prod }} " + with: + # run the query, and limit the results to the last run + query: "select * from monitoring_system.alerts where ts > '{{ last_workflow_run_time }}'" + as_dict: true + # create the alerts using Keep provider + actions: + # Create an alert in Keep based on the query results + - name: create-alert + provider: + type: keep + with: + # by default, the alert will be created in the "keep" source, this can be adjusted + override_source_with: "mysql" + # do not try to resolve alerts or smth like that - just sync from the database + read_only: true + # adjust if needed + fingerprint_fields: + - id + # build the alert payload from the query results + alert: + name: "{{ message }}" + status: "{{ state }}" + host: "{{ host }}" + service: "{{ service }}" + client: "{{ client }}" diff --git a/examples/workflows/create_jira_ticket_upon_alerts.yml b/examples/workflows/create_jira_ticket_upon_alerts.yml index 5a5f9d1a97..ed5b8aa968 100644 --- a/examples/workflows/create_jira_ticket_upon_alerts.yml +++ b/examples/workflows/create_jira_ticket_upon_alerts.yml @@ -1,6 +1,7 @@ workflow: - id: sentry-alerts - description: handle alerts + id: sentry-to-jira-bridge + name: Sentry-to-Jira Bridge + description: Creates Jira tickets for critical Sentry alerts and notifies relevant teams via Slack. triggers: - type: alert # we want to run this workflow only for Sentry alerts with high severity @@ -20,8 +21,8 @@ workflow: config: " {{ providers.team-payments-slack }} " with: message: | - "A new alert from Sentry: Alert: {{ alert.name }} - {{ alert.description }} - {{ alert}}" + "A new alert from Sentry: Alert: {{ alert.name }} - {{ alert.description }} + {{ alert}}" - name: create-jira-ticket-oncall-board if: "'{{ alert.service }}' == 'ftp' and not '{{ alert.ticket_id }}'" provider: @@ -29,6 +30,8 @@ workflow: config: " {{ providers.jira }} " with: board_name: "Oncall Board" + custom_fields: + customfield_10201: "Critical" issuetype: "Task" summary: "{{ alert.name }} - {{ alert.description }} (created by Keep)" description: | diff --git a/examples/workflows/create_multi_alert_from_vm_metric.yml b/examples/workflows/create_multi_alert_from_vm_metric.yml new file mode 100644 index 0000000000..45086da08e --- /dev/null +++ b/examples/workflows/create_multi_alert_from_vm_metric.yml @@ -0,0 +1,58 @@ +workflow: + # Unique identifier for this workflow + id: multi-service-cpu-monitor + # Display name shown in the UI + name: Multi-Service CPU Monitor + # Brief description of what this workflow does + description: Creates separate alerts for different services based on VictoriaMetrics CPU metrics with customizable thresholds. + triggers: + # This workflow can be triggered manually from the UI + - type: manual + steps: + # Query VictoriaMetrics for CPU metrics + - name: victoriametrics-step + provider: + # Use the VictoriaMetrics provider configuration + config: "{{ providers.vm }}" + type: victoriametrics + with: + # Query that returns the sum of CPU usage for each job + # Example response: + # [ + # {'metric': {'job': 'victoriametrics'}, 'value': [1737808021, '0.022633333333333307']}, + # {'metric': {'job': 'vmagent'}, 'value': [1737808021, '0.009299999999999998']} + # ] + query: sum(rate(process_cpu_seconds_total)) by (job) + queryType: query + + actions: + # Create an alert in Keep based on the query results + - name: create-alert + provider: + type: keep + with: + # Only create alert if CPU usage is above threshold + if: "{{ value.1 }} > 0.01 " + # Alert must persist for 1 minute + for: 1m + # Use job label to create unique fingerprint for each alert + fingerprint_fields: + - labels.job + alert: + # Alert name includes the specific job + name: "High CPU Usage on {{ metric.job }}" + description: "CPU usage is high on the VM (created from VM metric)" + # Set severity based on CPU usage thresholds: + # > 0.9 = critical + # > 0.7 = warning + # else = info + severity: '{{ value.1 }} > 0.9 ? "critical" : {{ value.1 }} > 0.7 ? "warning" : "info"' + labels: + # Job label is required for alert fingerprinting + job: "{{ metric.job }}" + # Additional context labels + environment: production + app: myapp + service: api + team: devops + owner: alice diff --git a/examples/workflows/create_service_now_ticket_upon_alerts.yml b/examples/workflows/create_service_now_ticket_upon_alerts.yml index c075ac2057..f6b99fb6b7 100644 --- a/examples/workflows/create_service_now_ticket_upon_alerts.yml +++ b/examples/workflows/create_service_now_ticket_upon_alerts.yml @@ -1,6 +1,7 @@ workflow: - id: servicenow - description: create a ticket in servicenow when an alert is triggered + id: prometheus-grafana-servicenow-integration + name: Prometheus/Grafana ServiceNow Integration + description: Creates ServiceNow tickets for Prometheus and Grafana alerts with rich context and alert enrichment. triggers: - type: alert # create ticket for grafana/prometheus alerts diff --git a/examples/workflows/cron-digest-alerts.yml b/examples/workflows/cron-digest-alerts.yml deleted file mode 100644 index 10baa9ecc7..0000000000 --- a/examples/workflows/cron-digest-alerts.yml +++ /dev/null @@ -1,27 +0,0 @@ -workflow: - id: alerts-daily-digest - description: run alerts digest twice a day (on 11:00 and 14:00) - triggers: - - type: interval - cron: 0 11,14 * * * - steps: - # get the alerts from keep - - name: get-alerts - provider: - type: keep - with: - filters: - # filter out alerts that are closed - - key: status - value: open - timerange: - from: "{{ state.workflows.alerts-daily-digest.last_run_time }}" - to: now - actions: - - name: send-digest - foreach: "{{ steps.get-alerts.results }}" - provider: - type: slack - config: "{{ providers.slack }}" - with: - message: "Open alert: {{ foreach.value.name }}" diff --git a/examples/workflows/datadog-log-monitor.yml b/examples/workflows/datadog-log-monitor.yml new file mode 100644 index 0000000000..3cbcd7866d --- /dev/null +++ b/examples/workflows/datadog-log-monitor.yml @@ -0,0 +1,49 @@ +workflow: + id: datadog-log-monitor + name: Datadog Log Monitor + description: Monitors Datadog logs for specific services and sends Slack notifications when error conditions are detected. + triggers: + - type: manual + steps: + - name: check-error-rate + provider: + type: datadog + config: "{{ providers.datadog }}" + with: + query: "service:keep-github-app" + timeframe: "3d" + query_type: "logs" + actions: + - name: trigger-slack + condition: + - name: threshold-condition + type: threshold + value: "keep.len({{ steps.check-error-rate.results.logs }})" + compare_to: 0 + compare_type: gt + provider: + type: slack + config: "{{ providers.slack-demo }}" + with: + channel: db-is-down + # Message is always mandatory + message: > + The db is down. Please investigate. + blocks: + - type: section + text: + type: plain_text + text: | + Query: {{ steps.check-error-rate.provider_parameters.query }} + Timeframe: {{ steps.check-error-rate.provider_parameters.timeframe }} + Number of logs: keep.len({{ steps.check-error-rate.results.logs }}) + From: {{ steps.check-error-rate.provider_parameters.from }} + To: {{ steps.check-error-rate.provider_parameters.to }} + providers: + db-server-mock: + description: Paper DB Server + authentication: + datadog: + authentication: + api_key: "{{ env.DATADOG_API_KEY }}" + app_key: "{{ env.DATADOG_APP_KEY }}" diff --git a/examples/workflows/db_disk_space.yml b/examples/workflows/db_disk_space.yml deleted file mode 100644 index cde82c520e..0000000000 --- a/examples/workflows/db_disk_space.yml +++ /dev/null @@ -1,131 +0,0 @@ -# Database disk space is low (<10%) -alert: - id: db-disk-space - description: Check that the DB has enough disk space - owners: - - github-shahargl - - slack-talboren - services: - - db - - api - # Run every 60 seconds - #interval: 60 - steps: - - name: db-no-space - provider: - type: mock - config: "{{ providers.db-server-mock }}" - with: - command: df -h | grep /dev/disk3s1s1 | awk '{ print $5}' # Check the disk space - command_output: 91% # Mock - actions: - - name: trigger-slack - condition: - - name: threshold-condition - type: threshold - value: "{{ steps.db-no-space.results }}" - compare_to: 90% # Trigger if more than 90% full - provider: - type: slack - config: " {{ providers.slack-demo }} " - with: - # Message is always mandatory - message: > - The disk space of {{ providers.db-server-mock.description }} is about to finish - Disk space left: {{ steps.db-no-space.results }} - blocks: - - type: header - text: - type: plain_text - text: 'Alert! :alarm_clock:' - emoji: true - - type: section - text: - type: mrkdwn - text: |- - Hello, SRE and Assistant to the Regional Manager Dwight! *Michael Scott* wants to know what's going on with the servers in the paper warehouse, there is a critical issue on-going and paper *must be delivered on time*. - *This is the alert context:* - - type: divider - - type: section - text: - type: mrkdwn - text: |- - Server *{{ providers.db-server-mock.description }}* - :floppy_disk: disk space is at {{ steps.db-no-space.results }} capacity - Seems like it prevents further inserts in to the database with some weird exception: 'This is a prank by Jim Halpert' - This means that paper production is currently on hold, Dunder Mifflin Paper Company *may lose revenue due to that*. - accessory: - type: image - image_url: https://media.licdn.com/dms/image/C4E03AQGtRDDj3GI4Ig/profile-displayphoto-shrink_800_800/0/1550248958619?e=2147483647&v=beta&t=-AYVwN44CsHUdIcd-7iOHQVVjfhEC0DZydhlmvNvTKo - alt_text: jim does dwight - - type: divider - - type: input - element: - type: multi_users_select - placeholder: - type: plain_text - text: Select users - emoji: true - action_id: multi_users_select-action - label: - type: plain_text - text: Select the people for the mission - emoji: true - - type: divider - - type: section - text: - type: plain_text - text: 'Some context that can help you:' - emoji: true - - type: context - elements: - - type: plain_text - text: 'DB System Info: Some important context fetched from the DB' - emoji: true - - type: context - elements: - - type: image - image_url: https://pbs.twimg.com/profile_images/625633822235693056/lNGUneLX_400x400.jpg - alt_text: cute cat - - type: mrkdwn - text: "*Cat* is currently on site, ready to follow your instructions." - - type: divider - - dispatch_action: true - type: input - element: - type: plain_text_input - action_id: plain_text_input-action - label: - type: plain_text - text: Please Acknowledge - emoji: true - - type: actions - elements: - - type: button - style: primary - text: - type: plain_text - text: ":dog: Datadog" - emoji: true - value: click_me_123 - - type: button - style: danger - text: - type: plain_text - text: ":sos: Database" - emoji: true - value: click_me_123 - url: https://google.com - - type: button - text: - type: plain_text - text: ":book: Playbook" - emoji: true - value: click_me_123 - url: https://google.com - - -providers: - db-server-mock: - description: Paper DB Server - authentication: diff --git a/examples/workflows/db_disk_space_monitor.yml b/examples/workflows/db_disk_space_monitor.yml new file mode 100644 index 0000000000..5fe66a8513 --- /dev/null +++ b/examples/workflows/db_disk_space_monitor.yml @@ -0,0 +1,132 @@ +# Database disk space is low (<10%) +workflow: + id: database-disk-space-monitor + name: Database Disk Space Monitor + description: Monitors database disk space usage and sends detailed Slack notifications with interactive components when space is low. + owners: + - github-shahargl + - slack-talboren + services: + - db + - api + # Run every 60 seconds + triggers: + - type: interval + value: 60 + steps: + - name: db-no-space + provider: + type: mock + config: "{{ providers.db-server-mock }}" + with: + command: df -h | grep /dev/disk3s1s1 | awk '{ print $5}' # Check the disk space + command_output: 91% # Mock + actions: + - name: trigger-slack + condition: + - name: threshold-condition + type: threshold + value: "{{ steps.db-no-space.results }}" + compare_to: 90% # Trigger if more than 90% full + provider: + type: slack + config: " {{ providers.slack-demo }} " + with: + # Message is always mandatory + message: > + The disk space of {{ providers.db-server-mock.description }} is about to finish + Disk space left: {{ steps.db-no-space.results }} + blocks: + - type: header + text: + type: plain_text + text: "Alert! :alarm_clock:" + emoji: true + - type: section + text: + type: mrkdwn + text: |- + Hello, SRE and Assistant to the Regional Manager Dwight! *Michael Scott* wants to know what's going on with the servers in the paper warehouse, there is a critical issue on-going and paper *must be delivered on time*. + *This is the alert context:* + - type: divider + - type: section + text: + type: mrkdwn + text: |- + Server *{{ providers.db-server-mock.description }}* + :floppy_disk: disk space is at {{ steps.db-no-space.results }} capacity + Seems like it prevents further inserts in to the database with some weird exception: 'This is a prank by Jim Halpert' + This means that paper production is currently on hold, Dunder Mifflin Paper Company *may lose revenue due to that*. + accessory: + type: image + image_url: https://media.licdn.com/dms/image/C4E03AQGtRDDj3GI4Ig/profile-displayphoto-shrink_800_800/0/1550248958619?e=2147483647&v=beta&t=-AYVwN44CsHUdIcd-7iOHQVVjfhEC0DZydhlmvNvTKo + alt_text: jim does dwight + - type: divider + - type: input + element: + type: multi_users_select + placeholder: + type: plain_text + text: Select users + emoji: true + action_id: multi_users_select-action + label: + type: plain_text + text: Select the people for the mission + emoji: true + - type: divider + - type: section + text: + type: plain_text + text: "Some context that can help you:" + emoji: true + - type: context + elements: + - type: plain_text + text: "DB System Info: Some important context fetched from the DB" + emoji: true + - type: context + elements: + - type: image + image_url: https://pbs.twimg.com/profile_images/625633822235693056/lNGUneLX_400x400.jpg + alt_text: cute cat + - type: mrkdwn + text: "*Cat* is currently on site, ready to follow your instructions." + - type: divider + - dispatch_action: true + type: input + element: + type: plain_text_input + action_id: plain_text_input-action + label: + type: plain_text + text: Please Acknowledge + emoji: true + - type: actions + elements: + - type: button + style: primary + text: + type: plain_text + text: ":dog: Datadog" + emoji: true + value: click_me_123 + - type: button + style: danger + text: + type: plain_text + text: ":sos: Database" + emoji: true + value: click_me_123 + url: https://google.com + - type: button + text: + type: plain_text + text: ":book: Playbook" + emoji: true + value: click_me_123 + url: https://google.com + providers: + db-server-mock: + description: Paper DB Server + authentication: diff --git a/examples/workflows/dd.yml b/examples/workflows/dd.yml deleted file mode 100644 index e6839c138e..0000000000 --- a/examples/workflows/dd.yml +++ /dev/null @@ -1,48 +0,0 @@ -alert: - id: db-disk-space - triggers: - - type: manual - steps: - - name: check-error-rate - provider: - type: datadog - config: "{{ providers.datadog }}" - with: - query: "service:keep-github-app" - timeframe: "3d" - query_type: "logs" - actions: - - name: trigger-slack - condition: - - name: threshold-condition - type: threshold - value: "keep.len({{ steps.check-error-rate.results.logs }})" - compare_to: 0 - operator: ">" - provider: - type: slack - config: " {{ providers.slack-demo }} " - with: - channel: db-is-down - # Message is always mandatory - message: > - The db is down. Please investigate. - blocks: - - type: section - text: - type: plain_text - text: | - Query: {{ steps.check-error-rate.provider_parameters.query }} - Timeframe: {{ steps.check-error-rate.provider_parameters.timeframe }} - Number of logs: keep.len({{ steps.check-error-rate.results.logs }}) - From: {{ steps.check-error-rate.provider_parameters.from }} - To: {{ steps.check-error-rate.provider_parameters.to }} - -providers: - db-server-mock: - description: Paper DB Server - authentication: - datadog: - authentication: - api_key: "{{ env.DATADOG_API_KEY }}" - app_key: "{{ env.DATADOG_APP_KEY }}" diff --git a/examples/workflows/discord_basic.yml b/examples/workflows/discord_basic.yml index 60ffb2db3b..666d7022a3 100644 --- a/examples/workflows/discord_basic.yml +++ b/examples/workflows/discord_basic.yml @@ -1,6 +1,7 @@ workflow: - id: discord-example - description: Discord example + id: discord-notification-demo + name: Discord Notification Demo + description: Demonstrates Discord integration with interactive button components for alert notifications. triggers: - type: manual actions: @@ -11,9 +12,9 @@ workflow: with: content: Alerta! components: - - type: 1 # Action row - components: - - type: 2 # Button - style: 1 # Primary style - label: "Click Me!" - custom_id: "button_click" + - type: 1 # Action row + components: + - type: 2 # Button + style: 1 # Primary style + label: "Click Me!" + custom_id: "button_click" diff --git a/examples/workflows/disk_grown_defects_rule.yml b/examples/workflows/disk_grown_defects_rule.yml index e139f82d7a..0ac050e41d 100644 --- a/examples/workflows/disk_grown_defects_rule.yml +++ b/examples/workflows/disk_grown_defects_rule.yml @@ -1,11 +1,15 @@ - # Alert description: this alert will trigger if the disk defects is over 50%, 40% or 30%. # Alert breakdown: # 1. Read the disk status from postgres (select * from disk) # 2. For each disk, check if the disk defects is over 50% (major), 40% (medium) or 30% (minor). # 3. If the disk defects is over the threshold, insert a new row to the alert table with the disk name and the disk defects. -alert: - id: DiskGrownDefectsRule +workflow: + id: disk-defect-tracker + name: Disk Defect Tracker + description: Monitors disk defects and creates tiered alerts in PostgreSQL based on defect percentage thresholds. + triggers: + - type: interval + value: 60 steps: - name: check-disk-defects provider: @@ -19,7 +23,7 @@ alert: condition: - name: threshold-condition type: threshold - value: " {{ foreach.value[13] }} " # disk defect is the 13th column + value: " {{ foreach.value[13] }} " # disk defect is the 13th column compare_to: 50, 40, 30 level: major, medium, minor provider: @@ -29,11 +33,11 @@ alert: query: >- INSERT INTO alert (alert_level, alert_message) VALUES ('{{ foreach.level }}', 'Disk defects: {{ foreach.value[13] }} | Disk name: {{ foreach.value[1] }}') -providers: - postgres-server: - description: The postgres server (sql) - authentication: - username: "{{ env.POSTGRES_USER }}" - password: "{{ env.POSTGRES_PASSWORD }}" - database: "{{ env.POSTGRES_DATABASE }}" - host: "{{ env.POSTGRES_HOST }}" + providers: + postgres-server: + description: The postgres server (sql) + authentication: + username: "{{ env.POSTGRES_USER }}" + password: "{{ env.POSTGRES_PASSWORD }}" + database: "{{ env.POSTGRES_DATABASE }}" + host: "{{ env.POSTGRES_HOST }}" diff --git a/examples/workflows/eks_advanced.yml b/examples/workflows/eks_advanced.yml new file mode 100644 index 0000000000..d40bb0ea3b --- /dev/null +++ b/examples/workflows/eks_advanced.yml @@ -0,0 +1,65 @@ +workflow: + id: eks-deployment-scaling-manager + name: EKS Deployment Scaling Manager + description: Manages EKS cluster operations including pod monitoring and deployment scaling. Retrieves pod status, scales nginx deployment, and provides detailed status reporting. + triggers: + - type: manual + steps: + # get all pods + - name: get-pods + provider: + type: eks + config: "{{ providers.eks }}" + with: + command_type: get_pods + + # get specific deployment info + - name: get-deployment-info + provider: + type: eks + config: "{{ providers.eks }}" + with: + command_type: get_deployment + namespace: default + deployment_name: nginx-test + + # scale up deployment + - name: scale-up + provider: + type: eks + config: "{{ providers.eks }}" + with: + command_type: scale_deployment + namespace: default + deployment_name: nginx-test + replicas: 4 + + # get pods after scaling + - name: get-pods-after-scale + provider: + type: eks + config: "{{ providers.eks }}" + with: + command_type: get_pods + namespace: default + + actions: + - name: echo-all-pods + foreach: "{{ steps.get-pods.results }}" + provider: + type: console + with: + message: "Pod name: {{ foreach.value.metadata.name }} || Namespace: {{ foreach.value.metadata.namespace }} || Status: {{ foreach.value.status.phase }}" + + - name: echo-deployment-info + provider: + type: console + with: + message: "Deployment {{ steps.get-deployment-info.results.metadata.name }} has {{ steps.get-deployment-info.results.status.replicas }} replicas" + + - name: echo-scaled-pods + foreach: "{{ steps.get-pods-after-scale.results }}" + provider: + type: console + with: + message: "After scaling - Pod name: {{ foreach.value.metadata.name }} || Status: {{ foreach.value.status.phase }}" diff --git a/examples/workflows/eks_basic.yml b/examples/workflows/eks_basic.yml new file mode 100644 index 0000000000..1d00455b45 --- /dev/null +++ b/examples/workflows/eks_basic.yml @@ -0,0 +1,21 @@ +workflow: + id: eks-pod-status-monitor + name: EKS Pod Status Monitor + description: Monitors and reports the status of all pods in an EKS cluster, including their names, namespaces, and current phases. + triggers: + - type: manual + steps: + # get all pods + - name: get-pods + provider: + type: eks + config: "{{ providers.eks }}" + with: + command_type: get_pods + actions: + - name: echo-pod-status + foreach: "{{ steps.get-pods.results }}" + provider: + type: console + with: + message: "Pod name: {{ foreach.value.metadata.name }} || Namespace: {{ foreach.value.metadata.namespace }} || Status: {{ foreach.value.status.phase }}" diff --git a/examples/workflows/elastic_basic.yml b/examples/workflows/elastic_basic.yml new file mode 100644 index 0000000000..20e0ea888d --- /dev/null +++ b/examples/workflows/elastic_basic.yml @@ -0,0 +1,19 @@ +workflow: + id: elastic-basic + name: Simple query from Elasticsearch + description: Querying alerts from Keep's elastic index (e.g. info alerts) + triggers: + - type: manual + steps: + - name: query-ack-index + provider: + type: elastic + config: " {{ providers.elastic }} " + with: + index: keep-alerts-keep + query: | + { + "query_string": { + "query": "info" + } + } diff --git a/examples/workflows/elastic_enrich_example.yml b/examples/workflows/elastic_enrich_example.yml index e70be706d4..1f385b862a 100644 --- a/examples/workflows/elastic_enrich_example.yml +++ b/examples/workflows/elastic_enrich_example.yml @@ -1,7 +1,8 @@ # if no acknowledgement has been recieved (updated in index) for x (from config index) time, i want to escalate it to next level of people workflow: - id: elastic-enrich - description: escalate-if-needed + id: alert-acknowledgment-escalator + name: Alert Acknowledgment Escalator + description: Monitors unacknowledged alerts in Elasticsearch and automatically escalates them based on configured thresholds. Integrates with people and configuration indices for smart escalation routing. triggers: # run every minute - type: interval @@ -9,62 +10,65 @@ workflow: steps: # first, query the ack index to check if there are any alerts that have not been acknowledged - name: query-ack-index - type: elastic - config: " {{ providers.elastic }} " - with: - index: your_ack_index - query: | - { - "query": { - "bool": { - "must": [ - { - "match": { - "acknowledged": false + provider: + type: elastic + config: " {{ providers.elastic }} " + with: + index: your_ack_index + query: | + { + "query": { + "bool": { + "must": [ + { + "match": { + "acknowledged": false + } } - } - ] + ] + } } } - } - name: query-config-index - type: elastic - config: " {{ providers.elastic }} " - with: - index: your_config_index - query: | - { - "query": { - "bool": { - "must": [ - { - "match": { - "config": true + provider: + type: elastic + config: " {{ providers.elastic }} " + with: + index: your_config_index + query: | + { + "query": { + "bool": { + "must": [ + { + "match": { + "config": true + } } - } - ] + ] + } } } - } - name: query-people-index - type: elastic - config: " {{ providers.elastic }} " - with: - index: your_people_index - query: | - { - "query": { - "bool": { - "must": [ - { - "match": { - "people": true + provider: + type: elastic + config: " {{ providers.elastic }} " + with: + index: your_people_index + query: | + { + "query": { + "bool": { + "must": [ + { + "match": { + "people": true + } } - } - ] + ] + } } } - } # now, we have the results from the ack index, config index, and people index actions: - name: escalate-if-needed @@ -75,4 +79,4 @@ workflow: config: " {{ providers.slack }} " with: message: | - "A unacknowledged alert has been found: {{ query-ack-index.hits.hits }} {{ query-config-index.hits.hits }} {{ query-people-index.hits.hits }}" + "A unacknowledged alert has been found: {{ query-ack-index.hits.hits }} {{ query-config-index.hits.hits }} {{ query-people-index.hits.hits }}" diff --git a/examples/workflows/enrich_using_structured_output_from_deepseek.yaml b/examples/workflows/enrich_using_structured_output_from_deepseek.yaml new file mode 100644 index 0000000000..2e78672f45 --- /dev/null +++ b/examples/workflows/enrich_using_structured_output_from_deepseek.yaml @@ -0,0 +1,41 @@ +workflow: + id: deepseek-alert-enrichment + name: DeepSeek Alert Enrichment + description: Enriches Prometheus alerts using DeepSeek Coder to determine environment and customer impact information through structured JSON output. + triggers: + - type: alert + filters: + - key: source + value: prometheus + + steps: + - name: get-enrichments + provider: + config: "{{ providers.my_deepseek }}" + type: deepseek + with: + prompt: | + You received such an alert {{alert}}, generate missing fields. + + Environment could be \"production\", \"staging\", \"development\". + + EXAMPLE JSON OUTPUT: + { + \"environment\": \"production\", + \"impacted_customer_name\": \"Acme Corporation\" + } + + model: "deepseek-coder-33b-instruct" + structured_output_format: # We limit what model could return + type: json_object + + actions: + - name: enrich-alert + provider: + type: mock + with: + enrich_alert: + - key: environment + value: "{{ steps.get-enrichments.results.response.environment }}" + - key: impacted_customer_name + value: "{{ steps.get-enrichments.results.response.impacted_customer_name }}" diff --git a/examples/workflows/enrich_using_structured_output_from_openai.yaml b/examples/workflows/enrich_using_structured_output_from_openai.yaml new file mode 100644 index 0000000000..402d6e627f --- /dev/null +++ b/examples/workflows/enrich_using_structured_output_from_openai.yaml @@ -0,0 +1,50 @@ +workflow: + id: openai-alert-enrichment + name: OpenAI Alert Enrichment + description: Enriches Prometheus alerts using GPT-4 structured output to determine environment and impacted customer information with strict schema validation. + + triggers: + - type: alert + filters: + - key: source + value: prometheus + + steps: + - name: get-enrichments + provider: + config: "{{ providers.my_openai }}" + type: openai # Could be also LiteLLM + with: + prompt: "You received such an alert {{alert}}, generate missing fields." + model: "gpt-4o-mini" # This model supports structured output + structured_output_format: # We limit what model could return + type: json_schema + json_schema: + name: missing_fields + schema: + type: object + properties: + environment: + type: string + enum: + - "production" + - "pre-prod" + - "debug" + description: "Be pessimistic, return pre-prod or production only if you see evidence in the alert body." + impacted_customer_name: + type: string + description: "Return undefined if you are not sure about the customer." + required: ["environment", "impacted_customer_name"] + additionalProperties: false + strict: true + + actions: + - name: enrich-alert + provider: + type: mock + with: + enrich_alert: + - key: environment + value: "{{ steps.get-enrichments.results.response.environment }}" + - key: impacted_customer_name + value: "{{ steps.get-enrichments.results.response.impacted_customer_name }}" diff --git a/examples/workflows/enrich_using_structured_output_from_vllm_qwen.yaml b/examples/workflows/enrich_using_structured_output_from_vllm_qwen.yaml new file mode 100644 index 0000000000..07abec499a --- /dev/null +++ b/examples/workflows/enrich_using_structured_output_from_vllm_qwen.yaml @@ -0,0 +1,44 @@ +workflow: + id: vllm-qwen-alert-enrichment + name: vLLM Qwen Alert Enrichment + description: Enriches Prometheus alerts using vLLM-hosted Qwen model to automatically determine environment type and impacted customer details. + + triggers: + - type: alert + filters: + - key: source + value: prometheus + + steps: + - name: get-enrichments + provider: + config: "{{ providers.my_vllm }}" + type: vllm + with: + prompt: "You received such an alert {{alert}}, generate missing fields." + model: "Qwen/Qwen1.5-1.8B-Chat" # This model supports structured output + structured_output_format: # We limit what model could return + type: object + properties: + environment: + type: string + enum: + - production + - debug + - pre-prod + impacted_customer_name: + type: string + required: + - environment + - impacted_customer_name + + actions: + - name: enrich-alert + provider: + type: mock + with: + enrich_alert: + - key: environment + value: "{{ steps.get-enrichments.results.response.environment }}" + - key: impacted_customer_name + value: "{{ steps.get-enrichments.results.response.impacted_customer_name }}" diff --git a/examples/workflows/failed-to-login-workflow.yml b/examples/workflows/failed-to-login-workflow.yml index caa8181fb0..0e13f816ac 100644 --- a/examples/workflows/failed-to-login-workflow.yml +++ b/examples/workflows/failed-to-login-workflow.yml @@ -1,6 +1,7 @@ workflow: - id: query-bigquery-when-alert-triggers-by-cloudwatch - description: Decide how to alert based on customer tier and enrich context + id: tiered-login-failure-response + name: Tiered Login Failure Response + description: Handles user login failures by querying customer tier from BigQuery and routes notifications to appropriate channels - OpsGenie for enterprise customers and Slack for all tiers. triggers: - type: alert filters: diff --git a/examples/workflows/flashduty_example.yml b/examples/workflows/flashduty_example.yml new file mode 100644 index 0000000000..dcb977472a --- /dev/null +++ b/examples/workflows/flashduty_example.yml @@ -0,0 +1,28 @@ +workflow: + id: flashduty-incident-notifier + name: FlashDuty Incident Notifier + description: Manages incident notifications in FlashDuty with customizable event statuses, labels, and environment tracking. + disabled: false + triggers: + - type: incident + events: + - created + - updated + - deleted + consts: {} + owners: [] + services: [] + steps: [] + actions: + - name: flashduty-action + provider: + type: flashduty + config: "{{ providers.default-flashduty }}" + with: + title: test title + description: test description + event_status: Info + alert_key: 611eed6614ec + labels: + service: flashduty + environment: dev diff --git a/examples/workflows/fluxcd_example.yml b/examples/workflows/fluxcd_example.yml new file mode 100644 index 0000000000..ef1b7d8e51 --- /dev/null +++ b/examples/workflows/fluxcd_example.yml @@ -0,0 +1,56 @@ +workflow: + id: fluxcd-example + name: "FluxCD Resource Monitor" + description: "Example workflow that retrieves Flux CD resources and creates alerts for failed deployments" + triggers: + - type: interval + value: 1800 # 30 minutes in seconds + steps: + - name: get-fluxcd-resources + provider: + type: fluxcd + config: "{{ providers.fluxcd }}" + with: + kubeconfig: "{{ env.KUBECONFIG }}" + namespace: "flux-system" + vars: + fluxcd_resources: "{{ steps.get-fluxcd-resources.results }}" + + - name: check-for-failed-deployments + provider: + type: console + with: + message: | + Found {{ vars.fluxcd_resources.kustomizations | length }} Kustomizations and {{ vars.fluxcd_resources.helm_releases | length }} HelmReleases + + - name: create-alerts-for-failed-kustomizations + foreach: "{{ vars.fluxcd_resources.kustomizations }}" + if: "{{ item.status.conditions[0].status == 'False' }}" + provider: + type: keep + with: + alert_name: "FluxCD Kustomization {{ item.metadata.name }} failed" + alert_description: "Kustomization {{ item.metadata.name }} in namespace {{ item.metadata.namespace }} failed with message: {{ item.status.conditions[0].message }}" + alert_severity: "critical" + alert_fingerprint: "fluxcd-kustomization-{{ item.metadata.name }}-{{ item.metadata.namespace }}" + alert_source: "fluxcd" + alert_labels: + namespace: "{{ item.metadata.namespace }}" + name: "{{ item.metadata.name }}" + type: "kustomization" + + - name: create-alerts-for-failed-helmreleases + foreach: "{{ vars.fluxcd_resources.helm_releases }}" + if: "{{ item.status.conditions[0].status == 'False' }}" + provider: + type: keep + with: + alert_name: "FluxCD HelmRelease {{ item.metadata.name }} failed" + alert_description: "HelmRelease {{ item.metadata.name }} in namespace {{ item.metadata.namespace }} failed with message: {{ item.status.conditions[0].message }}" + alert_severity: "critical" + alert_fingerprint: "fluxcd-helmrelease-{{ item.metadata.name }}-{{ item.metadata.namespace }}" + alert_source: "fluxcd" + alert_labels: + namespace: "{{ item.metadata.namespace }}" + name: "{{ item.metadata.name }}" + type: "helmrelease" diff --git a/examples/workflows/gcp_logging_open_ai.yaml b/examples/workflows/gcp_logging_open_ai.yaml new file mode 100644 index 0000000000..8c20b8bbd0 --- /dev/null +++ b/examples/workflows/gcp_logging_open_ai.yaml @@ -0,0 +1,42 @@ +workflow: + id: gcp-log-analysis-ai + name: GCP Log Analysis with AI + description: Analyzes Cloud Run errors using OpenAI to provide root cause analysis from GCP logs, including confidence scoring and relevant log entries. + disabled: false + triggers: + - type: manual + - filters: + - key: source + value: gcpmonitoring + type: alert + consts: {} + owners: [] + services: [] + steps: + - name: gcpmonitoring-step + provider: + config: "{{ providers.gcp }}" + type: gcpmonitoring + with: + as_json: false + filter: resource.type = "cloud_run_revision" {{alert.traceId}} + page_size: 1000 + raw: false + timedelta_in_days: 1 + - name: openai-step + provider: + config: "{{ providers.openai }}" + type: openai + with: + prompt: | + You are a very talented engineer that receives context from GCP logs + about an endpoint that returned 500 status code and reports back the root + cause analysis. Here is the context: keep.json_dumps({{steps.gcpmonitoring-step.results}}) (it is a JSON list of log entries from GCP Logging). + In your answer, also provide the log entry that made you conclude the root cause and specify what your certainty level is that it is the root cause. (between 1-10, where 1 is low and 10 is high) + actions: + - name: slack-action + provider: + config: "{{ providers.slack }}" + type: slack + with: + message: "{{steps.openai-step.results}}" diff --git a/examples/workflows/gke.yml b/examples/workflows/gke.yml index 78629bca93..3ed5b91a58 100644 --- a/examples/workflows/gke.yml +++ b/examples/workflows/gke.yml @@ -1,6 +1,7 @@ -alert: - id: gke-example - description: gke-example +workflow: + id: gke-pod-status-monitor + name: GKE Pod Status Monitor + description: Monitors and displays status information for all pods in a Google Kubernetes Engine cluster, including pod names, namespaces, and phases. triggers: - type: manual steps: @@ -17,4 +18,4 @@ alert: provider: type: console with: - alert_message: "Pod name: {{ foreach.value.metadata.name }} || Namespace: {{ foreach.value.metadata.namespace }} || Status: {{ foreach.value.status.phase }}" + message: "Pod name: {{ foreach.value.metadata.name }} || Namespace: {{ foreach.value.metadata.namespace }} || Status: {{ foreach.value.status.phase }}" diff --git a/examples/workflows/http_enrich.yml b/examples/workflows/http_enrich.yml new file mode 100644 index 0000000000..4fc7c2bb96 --- /dev/null +++ b/examples/workflows/http_enrich.yml @@ -0,0 +1,25 @@ +workflow: + id: http_enrich + name: Enrich alert with HTTP + description: Enrich alert with HTTP Action, using a public free API + disabled: false + triggers: + - type: alert + filters: + - key: source + value: prometheus + consts: {} + owners: [] + services: [] + steps: [] + actions: + - name: http-action + provider: + type: http + config: "{{ providers.default-http }}" + with: + url: https://api.restful-api.dev/objects/7 + method: GET + enrich_alert: + - key: computerName + value: results.body.name diff --git a/examples/workflows/ifelse.yml b/examples/workflows/ifelse.yml new file mode 100644 index 0000000000..acff568283 --- /dev/null +++ b/examples/workflows/ifelse.yml @@ -0,0 +1,56 @@ +workflow: + id: alert-routing-policy + name: Alert Routing Policy Manager + description: Routes alerts to appropriate channels based on multiple criteria including business hours, team ownership, environment, and monitor type with conditional flow control. + triggers: + - type: alert + actions: + - name: business-hours-check + if: "keep.is_business_hours(timezone='America/New_York')" + # stop the workflow if it's business hours + continue: false + provider: + type: console + with: + message: "Alert during business hours, exiting" + + - name: infra-prod-slack + if: "'{{ alert.team }}' == 'infra' and '{{ alert.env }}' == 'prod'" + provider: + type: slack + config: "{{ providers.slack-prod }}" + with: + channel: prod-infra-alerts + message: | + "Infrastructure Production Alert + Team: {{ alert.team }} + Environment: {{ alert.env }} + Description: {{ alert.description }}" + + - name: http-api-errors-slack + if: "'{{ alert.monitor_name }}' == 'Http API Errors'" + provider: + type: slack + config: "{{ providers.slack-prod }}" + with: + channel: backend-team-alerts + message: | + "HTTP API Error Alert + Monitor: {{ alert.monitor_name }} + Description: {{ alert.description }}" + # exit after sending http api error alert + continue: false + + - name: backend-staging-pagerduty + if: "'{{ alert.team }}'== 'backend' and '{{ alert.env }}' == 'staging'" + provider: + type: console + with: + severity: low + message: | + "Backend Staging Alert + Team: {{ alert.team }} + Environment: {{ alert.env }} + Description: {{ alert.description }}" + # Exit after sending staging alert + continue: false diff --git a/examples/workflows/ilert-incident-upon-alert.yaml b/examples/workflows/ilert-incident-upon-alert.yaml index 09ba1dea0c..735c16d71e 100644 --- a/examples/workflows/ilert-incident-upon-alert.yaml +++ b/examples/workflows/ilert-incident-upon-alert.yaml @@ -1,23 +1,25 @@ -id: aad72d69-92b9-4e21-8f67-97d2a69bf8ac -description: Create ILert incident upon Keep Alert -triggers: -- filters: - - key: source - value: keep - type: alert -owners: [] -services: [] -steps: [] -actions: -- name: ilert-action - provider: - config: '{{ providers.ilert-default }}' - type: ilert - with: - affectedServices: - - impact: OPERATIONAL - service: - id: 339743 - message: A mock incident created with Keep! - status: INVESTIGATING - summary: Keep Incident {{ alert.name }} +workflow: + id: ilert-incident-creator + name: iLert Incident Creator + description: Creates structured incidents in iLert from Keep alerts, including service impact assessment and investigation status tracking. + triggers: + - filters: + - key: source + value: keep + type: alert + owners: [] + services: [] + steps: [] + actions: + - name: ilert-action + provider: + config: "{{ providers.ilert-default }}" + type: ilert + with: + affectedServices: + - impact: OPERATIONAL + service: + id: 339743 + message: A mock incident created with Keep! + status: INVESTIGATING + summary: Keep Incident {{ alert.name }} diff --git a/examples/workflows/incident-enrich.yaml b/examples/workflows/incident-enrich.yaml new file mode 100644 index 0000000000..f41755c088 --- /dev/null +++ b/examples/workflows/incident-enrich.yaml @@ -0,0 +1,30 @@ +workflow: + id: incident-metadata-enricher + name: Incident Metadata Enricher + description: Enriches incidents with additional metadata including environment, incident IDs, URLs, and provider information while logging incident details. + disabled: false + triggers: + - type: manual + - events: + - created + - updated + type: incident + consts: {} + owners: [] + services: [] + steps: [] + actions: + - name: console-log + provider: + type: console + with: + message: "Incident name: {{ incident.user_generated_name }} | severity: {{ incident.severity }}" + enrich_incident: + - key: environment + value: "prod-de-prod" + - key: incident_id + value: "1234567890" + - key: incident_url + value: "https://keephq.dev/incident/1234567890" + - key: incident_provider + value: "jira" diff --git a/examples/workflows/incident-tier-escalation.yml b/examples/workflows/incident-tier-escalation.yml new file mode 100644 index 0000000000..108d00f749 --- /dev/null +++ b/examples/workflows/incident-tier-escalation.yml @@ -0,0 +1,41 @@ +workflow: + id: incident-tier-escalation + name: Incident Tier Escalation + description: Manages incident escalation tiers based on alert conditions, automatically adjusting notification tiers and sending appropriate Slack notifications for each level. + triggers: + # when an incident is created or updated with a new alert + - type: incident + events: + - created + - updated + actions: + - name: send-slack-message-tier-0 + # send tier0 if this is a new incident (no tier set) or if the incident is tier0 but the alert is alert2 + if: "{{ !incident.current_tier || incident.current_tier == 0 && alert.name == 'alert2' }}" + provider: + type: slack + config: "{{ providers.slack }}" + with: + message: | + "Incident created: {{ incident.name }} - {{ incident.description }} + Tier: 0" + Alert: {{ alert.name }} - {{ alert.description }} + Alert details: {{ alert }}" + # enrich the incident with the current tier + enrich_incident: + - key: current_tier + value: 0 + - name: send-slack-message-tier-1 + if: "{{ incident.current_tier == 0 && alert.name == 'alert1' }}" + provider: + type: slack + config: "{{ providers.slack }}" + with: + message: | + "Incident updated: {{ incident.name }} - {{ incident.description }} + Tier: 1 + Alert: {{ alert.name }} - {{ alert.description }} + Alert details: {{ alert }}" + enrich_incident: + - key: current_tier + value: 1 diff --git a/examples/workflows/incident_example.yml b/examples/workflows/incident_example.yml new file mode 100644 index 0000000000..f5a1e504ac --- /dev/null +++ b/examples/workflows/incident_example.yml @@ -0,0 +1,16 @@ +workflow: + id: incident-echo-monitor + name: Incident Echo Monitor + description: Monitors incident updates and creations, providing basic console logging for incident tracking and debugging. + triggers: + - type: incident + events: + - updated + - created + + actions: + - name: just-echo + provider: + type: console + with: + message: "Hey there! I am an incident!" diff --git a/examples/workflows/inputs_example.yml b/examples/workflows/inputs_example.yml new file mode 100644 index 0000000000..abd026d560 --- /dev/null +++ b/examples/workflows/inputs_example.yml @@ -0,0 +1,36 @@ +workflow: + id: input-example + name: Input Example + description: Simple workflow demonstrating input functionality with customizable messages. + triggers: + - type: manual + + inputs: + - name: message + description: The message to log to the console + type: string + default: "Hey" + - name: nodefault + description: A no default examples + type: string + - name: boolexample + description: Whether to log the message + type: boolean + default: true + - name: choiceexample + description: The choice to make + type: choice + default: "option1" + options: + - option1 + - option2 + - option3 + actions: + - name: echo + provider: + type: console + with: + message: | + "This is my input message: {{ inputs.message }} + This is my input boolean: {{ inputs.boolexample }} + This is my input choice: {{ inputs.choiceexample }}" diff --git a/examples/workflows/jira-create-ticket-on-alert.yml b/examples/workflows/jira-create-ticket-on-alert.yml new file mode 100644 index 0000000000..6e74946361 --- /dev/null +++ b/examples/workflows/jira-create-ticket-on-alert.yml @@ -0,0 +1,32 @@ +workflow: + id: jira-create-ticket-on-alert + name: Create Jira Ticket on Alert + description: Create Jira ticket when alert fires + disabled: false + triggers: + - type: alert + cel: status == "firing" + actions: + - name: jira-action + if: "not '{{ alert.ticket_id }}'" + provider: + type: jira + config: "{{ providers.JiraCloud }}" + with: + board_name: YOUR_BOARD_NAME # Change this to your board name + issue_type: Task # Or Bug, Story, etc. + summary: "{{ alert.name }} - {{ alert.description }}" + description: | + "This ticket was created automatically by Keep. + + Alert Details: + {code:json} + {{ alert }} + {code}" + enrich_alert: + - key: ticket_type + value: jira + - key: ticket_id + value: results.issue.key + - key: ticket_url + value: results.ticket_url \ No newline at end of file diff --git a/examples/workflows/jira-transition-on-resolved.yml b/examples/workflows/jira-transition-on-resolved.yml new file mode 100644 index 0000000000..cf09c9c079 --- /dev/null +++ b/examples/workflows/jira-transition-on-resolved.yml @@ -0,0 +1,26 @@ +workflow: + id: jira-transition-on-resolved + name: Transition Jira Ticket to Done + description: Close Jira ticket when alert is resolved + disabled: false + triggers: + - type: alert + cel: status == "resolved" + actions: + - name: jira-action + provider: + type: jira + config: "{{ providers.JiraCloud }}" + with: + issue_id: "{{ alert.ticket_id }}" + summary: "{{ alert.name }} - {{ alert.description }} (resolved)" + description: | + "Alert has been resolved automatically by Keep. + + Resolved at: {{ alert.lastReceived }} + + Original Alert Details: + {code:json} + {{ alert }} + {code}" + transition_to: Done # Change to your workflow's status name \ No newline at end of file diff --git a/examples/workflows/jira_on_prem.yml b/examples/workflows/jira_on_prem.yml index 11e4dcc48f..a27138ebb3 100644 --- a/examples/workflows/jira_on_prem.yml +++ b/examples/workflows/jira_on_prem.yml @@ -1,24 +1,24 @@ workflow: - id: jiraonprem-example - description: test + id: jira-onprem-incident-creator + name: Jira On-Prem Incident Creator + description: Creates standardized incidents in on-premises Jira with customizable fields, labels, and priorities for SRE team tracking. triggers: - - type: manual - name: test + - type: manual owners: [] services: [] steps: [] actions: - - name: jiraonprem-action - provider: - config: '{{ providers.jira }}' - type: jiraonprem - with: - board_name: SA - custom_fields: '' - description: test - issue_type: Incident - labels: - - "SRE_Team" - priority: Low - project_key: SA - summary: test + - name: jiraonprem-action + provider: + config: "{{ providers.jira }}" + type: jiraonprem + with: + board_name: SA + custom_fields: "" + description: test + issue_type: Incident + labels: + - "SRE_Team" + priority: Low + project_key: SA + summary: test diff --git a/examples/workflows/keep_semantic_alert_example_datadog.yml b/examples/workflows/keep_semantic_alert_example_datadog.yml deleted file mode 100644 index 325e7bf980..0000000000 --- a/examples/workflows/keep_semantic_alert_example_datadog.yml +++ /dev/null @@ -1,34 +0,0 @@ -# AUTO GENERATED -# Alert that was created with Keep semantic layer -# Prompt: can you write an alert spec that triggers when a service has more than 0.01% error rate in datadog for more than an hour? -alert: - id: service-error-rate - description: Check if the service has more than 0.01% error rate for more than an hour - owners: - - github-johndoe - - slack-janedoe - services: - - my-service - steps: - - name: check-error-rate - provider: - type: datadog - config: "{{ providers.datadog }}" - with: - query: "sum:my_service.errors{*}.as_count() / sum:my_service.requests{*}.as_count() * 100" - timeframe: "1h" - actions: - - name: notify-slack - condition: - - name: threshold-condition - type: threshold - value: "{{ steps.check-error-rate.results }}" - compare_to: 0.01 - operator: ">" - provider: - type: slack - config: "{{ providers.slack-demo }}" - with: - channel: service-alerts - message: > - The my_service error rate is higher than 0.01% for more than an hour. Please investigate. diff --git a/examples/workflows/monday_create_pulse.yml b/examples/workflows/monday_create_pulse.yml new file mode 100644 index 0000000000..47db926800 --- /dev/null +++ b/examples/workflows/monday_create_pulse.yml @@ -0,0 +1,27 @@ +workflow: + id: monday-pulse-creator + name: Monday.com Pulse Creator + description: Creates new pulses (items) in Monday.com boards with customizable column values and group assignments. + triggers: + - type: manual + actions: + - name: monday + provider: + type: monday + config: "{{ providers.monday }}" + with: + # Open the board in monday.com web app. + # Hover over the board name in the side panel, click on the three dots that appear, and click on ID to copy the board ID. + board_id: 1956384489 + # Hover over the group name in the board, click on the three dots that appear, and click on Group ID to copy the group ID. + group_id: "topics" + # Item Name is the name of the pulse you want to add. + item_name: "Test" + column_values: + # Specify the column IDs and their corresponding values for the new item/pulse. + # Hover over the column name in the board, click on the three dots that appear, and click on Column ID to copy the column ID. + # The Key is the column ID and the Value is the value you want to set for the column. + - text_mkm77x3p: "helo" + # Here text_mkm77x3p is the column ID and helo is the value. + - text_1_mkm7x2ep: "10" + # Here text_1_mkm7x2ep is the column ID and 10 is the value. diff --git a/examples/workflows/multi-condition-cel.yml b/examples/workflows/multi-condition-cel.yml new file mode 100644 index 0000000000..23c636e256 --- /dev/null +++ b/examples/workflows/multi-condition-cel.yml @@ -0,0 +1,13 @@ +workflow: + id: multi-condition-monitor-cel + name: Multi-Condition Monitor (CEL) + description: Monitors alerts with multiple conditions using CEL filters. + triggers: + - type: alert + cel: source.contains("prometheus") && severity == "critical" && environment == "production" + actions: + - name: notify + provider: + type: console + with: + message: "Critical production alert from Prometheus: {{ alert.name }}" diff --git a/examples/workflows/mustache-paths-example.yml b/examples/workflows/mustache-paths-example.yml new file mode 100644 index 0000000000..99ccdf57b2 --- /dev/null +++ b/examples/workflows/mustache-paths-example.yml @@ -0,0 +1,34 @@ +workflow: + id: mustache-path-extractor + name: Mustache Path Extractor + description: Demonstrates extraction of values from nested dictionaries and lists using Mustache templating with Python and console output. + disabled: false + triggers: + - type: manual + consts: {} + owners: [] + services: [] + steps: + - name: step-with-dict + provider: + config: "{{ providers.default-python }}" + type: python + with: + code: "{'hello': 'world', 'nested': {'bye': 'bye'}, 'nested_list': ['a','b','c', {'in': 'list'}]}" + - name: step-with-list + provider: + config: "{{ providers.default-python }}" + type: python + with: + code: "[{'hello': 'world', 'nested': {'bye': 'bye'}, 'nested_list': ['a','b','c', {'in': 'list'}]}]" + - name: console-step-with-dict + provider: + type: console + with: + message: "{{ steps.step-with-dict.results.hello }}" + - name: console-step-with-list + provider: + type: console + with: + message: "{{ steps.step-with-list.results.0.nested.bye }}" + actions: [] diff --git a/examples/workflows/new-auth0-users-monitor.yml b/examples/workflows/new-auth0-users-monitor.yml new file mode 100644 index 0000000000..1c7076a9cd --- /dev/null +++ b/examples/workflows/new-auth0-users-monitor.yml @@ -0,0 +1,40 @@ +# Alert when there are new Auth0 users +workflow: + id: new-auth0-users-monitor + name: New Auth0 Users Monitor + description: Tracks new Auth0 user signups and sends Slack notifications with detailed user information, maintaining state between runs. + triggers: + - type: interval + value: 3600 # every hour + steps: + - name: get-auth0-users + provider: + type: auth0.logs + config: "{{ providers.auth0 }}" + with: + log_type: ss + previous_users: "{{ state.new-auth0-users.-1.alert_context.alert_steps_context.get-auth0-users.results.users }}" # state.alert-id.-1 for last run + actions: + - name: trigger-slack + condition: + - name: assert-condition + type: assert + assert: "{{ steps.get-auth0-users.results.new_users_count }} == 0" # if there are more than 0 new users, trigger the action + provider: + type: slack + config: " {{ providers.slack-demo }} " + with: + blocks: + - type: section + text: + type: plain_text + text: There are new keep.len({{ steps.get-auth0-users.results.new_users }}) users! + emoji: true + - type: section + text: + type: plain_text + text: |- + {{#steps.get-auth0-users.results.new_users}} + - {{user_name}} + {{/steps.get-auth0-users.results.new_users}} + emoji: true \ No newline at end of file diff --git a/examples/workflows/new_auth0_users.yml b/examples/workflows/new_auth0_users.yml deleted file mode 100644 index e9c0f0fe40..0000000000 --- a/examples/workflows/new_auth0_users.yml +++ /dev/null @@ -1,36 +0,0 @@ -# Alert when there are new Auth0 users -alert: - id: new-auth0-users - description: Get new users logged in to the platform - steps: - - name: get-auth0-users - provider: - type: auth0.logs - config: "{{ providers.auth0 }}" - with: - log_type: ss - previous_users: "{{ state.new-auth0-users.-1.alert_context.alert_steps_context.get-auth0-users.results.users }}" # state.alert-id.-1 for last run - actions: - - name: trigger-slack - condition: - - name: assert-condition - type: assert - assert: "{{ steps.get-auth0-users.results.new_users_count }} == 0" # if there are more than 0 new users, trigger the action - provider: - type: slack - config: " {{ providers.slack-demo }} " - with: - blocks: - - type: section - text: - type: plain_text - text: There are new keep.len({{ steps.get-auth0-users.results.new_users }}) users! - emoji: true - - type: section - text: - type: plain_text - text: |- - {{#steps.get-auth0-users.results.new_users}} - - {{user_name}} - {{/steps.get-auth0-users.results.new_users}} - emoji: true diff --git a/examples/workflows/new_github_stars.yml b/examples/workflows/new_github_stars.yml index 41f91e3d5b..5053ff2e26 100644 --- a/examples/workflows/new_github_stars.yml +++ b/examples/workflows/new_github_stars.yml @@ -1,42 +1,47 @@ -id: new-github-stars -description: Notify Slack about new GitHub star for keephq/keep -triggers: - - type: manual - - type: interval - value: 300 -steps: - - name: get-github-stars - provider: - config: "{{ providers.github }}" - type: github.stars - with: - previous_stars_count: - default: 0 - key: "{{ last_workflow_results.get-github-stars.0.stars }}" - repository: keephq/keep -actions: - - condition: - - assert: "{{ steps.get-github-stars.results.new_stargazers_count }} > 0" - name: assert-condition - type: assert - name: trigger-slack - provider: - config: "{{ providers.slack-demo }}" - type: slack - with: - blocks: - - text: - emoji: true - text: There are new keep.len({{ steps.get-github-stars.results.new_stargazers}}) stargazers for keephq/keep - type: plain_text - type: section - - text: - emoji: true - text: "{{#steps.get-github-stars.results.new_stargazers}} +workflow: + id: github-star-tracker + name: GitHub Star Tracker + description: Monitors new GitHub stars for the Keep repository and sends Slack notifications with stargazer details and timestamps. + triggers: + - type: manual + - type: interval + value: 300 + steps: + - name: get-github-stars + provider: + config: "{{ providers.github }}" + type: github.stars + with: + previous_stars_count: + default: 0 + key: "{{ last_workflow_results.get-github-stars.0.stars }}" + last_stargazer: + default: "" + key: "{{ last_workflow_results.get-github-stars.0.last_stargazer }}" + repository: keephq/keep + actions: + - condition: + - assert: "{{ steps.get-github-stars.results.new_stargazers_count }} > 0" + name: assert-condition + type: assert + name: trigger-slack + provider: + config: "{{ providers.slack-demo }}" + type: slack + with: + blocks: + - text: + emoji: true + text: There are new keep.len({{ steps.get-github-stars.results.new_stargazers}}) stargazers for keephq/keep + type: plain_text + type: section + - text: + emoji: true + text: "{{#steps.get-github-stars.results.new_stargazers}} - - {{username}} at {{starred_at}} + - {{username}} at {{starred_at}} - {{/steps.get-github-stars.results.new_stargazers}}" - type: plain_text - type: section - channel: "C06N0KXXXX" + {{/steps.get-github-stars.results.new_stargazers}}" + type: plain_text + type: section + channel: "C06N0KXXXX" diff --git a/examples/workflows/notify-new-trello-card.yml b/examples/workflows/notify-new-trello-card.yml new file mode 100644 index 0000000000..681d4938e1 --- /dev/null +++ b/examples/workflows/notify-new-trello-card.yml @@ -0,0 +1,30 @@ +# A new trello card was created +workflow: + id: notify-new-trello-card + name: Notify on new Trello card + description: Send a slack notification when a new trello card is created + triggers: + - type: interval + value: 60 + steps: + - name: trello-cards + provider: + type: trello + config: "{{ providers.trello-provider }}" + with: + board_id: hIjQQX9S + filter: "createCard" + condition: + - name: assert-condition + type: assert + assert: "{{ state.notify-new-trello-card.-1.alert_context.alert_steps_context.trello-cards.results.number_of_cards }} >= {{steps.trello-cards.results.number_of_cards }}" + actions: + - name: trigger-slack + provider: + type: slack + config: "{{ providers.slack-demo }}" + with: + channel: some-channel-that-youll-decide-later + # Message is always mandatory + message: > + A new card was created diff --git a/examples/workflows/ntfy_basic.yml b/examples/workflows/ntfy_basic.yml index 64b762771f..db625559ae 100644 --- a/examples/workflows/ntfy_basic.yml +++ b/examples/workflows/ntfy_basic.yml @@ -1,6 +1,7 @@ workflow: - id: ntfy-example - description: ntfy-example + id: ntfy-notification-sender + name: Ntfy Notification Sender + description: Sends notifications to Ntfy topics with customizable messages for basic alerting and communication. triggers: - type: manual actions: diff --git a/examples/workflows/opensearchserverless_basic.yml b/examples/workflows/opensearchserverless_basic.yml new file mode 100644 index 0000000000..401a7cb163 --- /dev/null +++ b/examples/workflows/opensearchserverless_basic.yml @@ -0,0 +1,28 @@ +workflow: + id: opensearch-serverless-create-query + name: OSS Create Query Docs + description: Retrieves all the documents from index keep, and uploads a document to opensearch in index keep. + disabled: false + triggers: + - type: manual + steps: + # This step will fail if there is no index called keep + - name: query-index + provider: + type: opensearchserverless + config: "{{ providers.opensearchserverless }}" + with: + query: + query: + match_all: {} + index: keep + actions: + - name: create-doc + provider: + type: opensearchserverless + config: "{{ providers.opensearchserverless }}" + with: + index: keep + document: + message: Keep test doc + doc_id: doc_1 diff --git a/examples/workflows/openshift_basic.yml b/examples/workflows/openshift_basic.yml new file mode 100644 index 0000000000..fd4e538fa0 --- /dev/null +++ b/examples/workflows/openshift_basic.yml @@ -0,0 +1,58 @@ +workflow: + id: openshift-basic-monitoring + name: OpenShift Basic Monitoring + description: Simple OpenShift monitoring workflow that gets cluster status and pod information + triggers: + - type: manual + steps: + # Get all OpenShift projects + - name: get-projects + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + command_type: get_projects + + # Get all pods + - name: get-pods + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + command_type: get_pods + + # Get OpenShift routes + - name: get-routes + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + command_type: get_routes + + actions: + # Display cluster summary + - name: display-cluster-summary + provider: + type: console + with: + message: | + 🔍 OpenShift Cluster Summary: + - Projects: {{ steps.get-projects.results | length }} + - Total Pods: {{ steps.get-pods.results | length }} + - Routes: {{ steps.get-routes.results | length }} + + # Show pod status for each namespace + - name: display-pod-status + foreach: "{{ steps.get-pods.results }}" + provider: + type: console + with: + message: "Pod: {{ foreach.value.metadata.name }} | Namespace: {{ foreach.value.metadata.namespace }} | Status: {{ foreach.value.status.phase }}" + + # List all projects + - name: list-projects + foreach: "{{ steps.get-projects.results }}" + provider: + type: console + with: + message: "Project: {{ foreach.value.metadata.name }} | Status: {{ foreach.value.status.phase | default('Active') }}" \ No newline at end of file diff --git a/examples/workflows/openshift_monitoring_and_remediation.yml b/examples/workflows/openshift_monitoring_and_remediation.yml new file mode 100644 index 0000000000..7611387b23 --- /dev/null +++ b/examples/workflows/openshift_monitoring_and_remediation.yml @@ -0,0 +1,229 @@ +workflow: + id: openshift-monitoring-and-remediation + name: OpenShift Monitoring and Remediation + description: | + Comprehensive OpenShift monitoring workflow that demonstrates: + - Getting cluster information (projects, pods, routes, deployment configs) + - Monitoring pod health and events + - Automatic remediation actions (restart pods, scale deployments) + - Alert-driven workflows for OpenShift clusters + triggers: + - type: manual + - type: alert + filters: + - key: source + value: openshift + - key: severity + value: critical + steps: + # Get all OpenShift projects + - name: get-projects + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + command_type: get_projects + + # Get all pods across namespaces + - name: get-all-pods + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + command_type: get_pods + + # Get deployment configs + - name: get-deployment-configs + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + command_type: get_deploymentconfigs + + # Get routes + - name: get-routes + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + command_type: get_routes + + # Get node pressure conditions + - name: get-node-pressure + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + command_type: get_node_pressure + + # Get events for a specific namespace (if alert provides namespace) + - name: get-events + if: "{{ alert.namespace }}" + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + command_type: get_events + namespace: "{{ alert.namespace }}" + + # Get pod logs for failing pods (if alert provides pod name) + - name: get-pod-logs + if: "{{ alert.pod_name and alert.namespace }}" + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + command_type: get_logs + namespace: "{{ alert.namespace }}" + pod_name: "{{ alert.pod_name }}" + tail_lines: 50 + + actions: + # Report cluster overview + - name: report-cluster-overview + provider: + type: console + with: + message: | + 🔍 OpenShift Cluster Overview: + - Projects: {{ steps.get-projects.results | length }} + - Total Pods: {{ steps.get-all-pods.results | length }} + - Deployment Configs: {{ steps.get-deployment-configs.results | length }} + - Routes: {{ steps.get-routes.results | length }} + - Node Pressure Issues: {{ steps.get-node-pressure.results | selectattr('conditions', 'ne', []) | list | length }} + + # Alert on failing pods + - name: alert-failing-pods + foreach: "{{ steps.get-all-pods.results | selectattr('status.phase', 'ne', 'Running') | selectattr('status.phase', 'ne', 'Succeeded') }}" + provider: + type: console + with: + message: | + ⚠️ Pod Issue Detected: + - Pod: {{ foreach.value.metadata.name }} + - Namespace: {{ foreach.value.metadata.namespace }} + - Status: {{ foreach.value.status.phase }} + - Node: {{ foreach.value.spec.nodeName }} + + # Restart failing pods automatically (CrashLoopBackOff, Failed) + - name: restart-failed-pods + foreach: "{{ steps.get-all-pods.results | selectattr('status.phase', 'in', ['CrashLoopBackOff', 'Failed']) }}" + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + action: restart_pod + namespace: "{{ foreach.value.metadata.namespace }}" + pod_name: "{{ foreach.value.metadata.name }}" + message: "Auto-restarting failed pod {{ foreach.value.metadata.name }}" + + # Scale up deployment if alert indicates high load + - name: scale-deployment-on-high-load + if: "{{ alert.deployment_name and alert.namespace and alert.scale_up }}" + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + action: scale_deployment + namespace: "{{ alert.namespace }}" + deployment_name: "{{ alert.deployment_name }}" + replicas: "{{ alert.target_replicas | default(3) }}" + + # Scale up deployment config if specified + - name: scale-deploymentconfig-on-demand + if: "{{ alert.deploymentconfig_name and alert.namespace and alert.scale_up }}" + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + action: scale_deploymentconfig + namespace: "{{ alert.namespace }}" + deploymentconfig_name: "{{ alert.deploymentconfig_name }}" + replicas: "{{ alert.target_replicas | default(2) }}" + + # Restart deployment on critical alerts + - name: restart-deployment-on-critical-alert + if: "{{ alert.severity == 'critical' and alert.deployment_name and alert.namespace }}" + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + action: rollout_restart + kind: "deployment" + name: "{{ alert.deployment_name }}" + namespace: "{{ alert.namespace }}" + + # Restart deployment config on critical alerts + - name: restart-deploymentconfig-on-critical-alert + if: "{{ alert.severity == 'critical' and alert.deploymentconfig_name and alert.namespace }}" + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + action: rollout_restart + kind: "deploymentconfig" + name: "{{ alert.deploymentconfig_name }}" + namespace: "{{ alert.namespace }}" + + # Send notification with detailed information + - name: send-notification + if: "{{ alert }}" + provider: + type: slack + config: "{{ providers.slack }}" + with: + message: | + 🚨 OpenShift Alert: {{ alert.name }} + + 📊 Cluster Status: + • Projects: {{ steps.get-projects.results | length }} + • Total Pods: {{ steps.get-all-pods.results | length }} + • Failing Pods: {{ steps.get-all-pods.results | selectattr('status.phase', 'ne', 'Running') | selectattr('status.phase', 'ne', 'Succeeded') | list | length }} + + 🔍 Alert Details: + • Severity: {{ alert.severity }} + • Source: {{ alert.source }} + • Namespace: {{ alert.namespace | default('N/A') }} + • Pod: {{ alert.pod_name | default('N/A') }} + + 🛠️ Actions Taken: + {% if alert.deployment_name and alert.scale_up %}• Scaled deployment {{ alert.deployment_name }} to {{ alert.target_replicas | default(3) }} replicas{% endif %} + {% if alert.deploymentconfig_name and alert.scale_up %}• Scaled DeploymentConfig {{ alert.deploymentconfig_name }} to {{ alert.target_replicas | default(2) }} replicas{% endif %} + {% if alert.severity == 'critical' and (alert.deployment_name or alert.deploymentconfig_name) %}• Performed rollout restart{% endif %} + +# Example alert payloads to test this workflow: + +# Manual trigger for cluster overview: +# No additional data needed + +# High load scaling scenario: +# { +# "name": "High CPU Usage", +# "severity": "warning", +# "source": "openshift", +# "namespace": "production", +# "deployment_name": "web-app", +# "scale_up": true, +# "target_replicas": 5 +# } + +# Critical pod failure: +# { +# "name": "Pod CrashLoopBackOff", +# "severity": "critical", +# "source": "openshift", +# "namespace": "production", +# "pod_name": "web-app-123-abc", +# "deployment_name": "web-app" +# } + +# DeploymentConfig scaling: +# { +# "name": "Scale DeploymentConfig", +# "severity": "warning", +# "source": "openshift", +# "namespace": "staging", +# "deploymentconfig_name": "api-server", +# "scale_up": true, +# "target_replicas": 3 +# } \ No newline at end of file diff --git a/examples/workflows/openshift_pod_restart.yml b/examples/workflows/openshift_pod_restart.yml new file mode 100644 index 0000000000..c73e3de079 --- /dev/null +++ b/examples/workflows/openshift_pod_restart.yml @@ -0,0 +1,159 @@ +workflow: + id: openshift-pod-restart-remediation + name: OpenShift Pod Restart Remediation + description: Automatically restart failing pods and scale deployments based on alerts or manual triggers + triggers: + - type: manual + - type: alert + filters: + - key: source + value: openshift + - key: pod_status + value: CrashLoopBackOff + steps: + # Get pod details for a specific namespace + - name: get-namespace-pods + if: "{{ alert.namespace }}" + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + command_type: get_pods + namespace: "{{ alert.namespace }}" + + # Get pod logs if specific pod is mentioned + - name: get-failing-pod-logs + if: "{{ alert.pod_name and alert.namespace }}" + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + command_type: get_logs + namespace: "{{ alert.namespace }}" + pod_name: "{{ alert.pod_name }}" + tail_lines: 100 + + # Get events for the namespace to understand issues + - name: get-namespace-events + if: "{{ alert.namespace }}" + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + command_type: get_events + namespace: "{{ alert.namespace }}" + + actions: + # Restart specific pod if mentioned in alert + - name: restart-specific-pod + if: "{{ alert.pod_name and alert.namespace }}" + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + action: restart_pod + namespace: "{{ alert.namespace }}" + pod_name: "{{ alert.pod_name }}" + message: "Restarting pod due to {{ alert.pod_status | default('failure') }}" + + # Scale deployment if replica count is specified + - name: scale-deployment + if: "{{ alert.deployment_name and alert.namespace and alert.replicas }}" + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + action: scale_deployment + namespace: "{{ alert.namespace }}" + deployment_name: "{{ alert.deployment_name }}" + replicas: "{{ alert.replicas }}" + + # Scale deployment config if specified + - name: scale-deploymentconfig + if: "{{ alert.deploymentconfig_name and alert.namespace and alert.replicas }}" + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + action: scale_deploymentconfig + namespace: "{{ alert.namespace }}" + deploymentconfig_name: "{{ alert.deploymentconfig_name }}" + replicas: "{{ alert.replicas }}" + + # Rollout restart deployment + - name: rollout-restart-deployment + if: "{{ alert.deployment_name and alert.namespace and alert.restart_deployment }}" + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + action: rollout_restart + kind: "deployment" + name: "{{ alert.deployment_name }}" + namespace: "{{ alert.namespace }}" + + # Rollout restart deployment config + - name: rollout-restart-deploymentconfig + if: "{{ alert.deploymentconfig_name and alert.namespace and alert.restart_deployment }}" + provider: + type: openshift + config: "{{ providers.openshift }}" + with: + action: rollout_restart + kind: "deploymentconfig" + name: "{{ alert.deploymentconfig_name }}" + namespace: "{{ alert.namespace }}" + + # Report remediation actions taken + - name: report-actions + provider: + type: console + with: + message: | + 🔧 OpenShift Remediation Actions Completed: + {% if alert.pod_name %} + - Restarted pod: {{ alert.pod_name }} in {{ alert.namespace }} + {% endif %} + {% if alert.deployment_name and alert.replicas %} + - Scaled deployment {{ alert.deployment_name }} to {{ alert.replicas }} replicas + {% endif %} + {% if alert.deploymentconfig_name and alert.replicas %} + - Scaled DeploymentConfig {{ alert.deploymentconfig_name }} to {{ alert.replicas }} replicas + {% endif %} + {% if alert.restart_deployment %} + - Performed rollout restart on {{ alert.deployment_name or alert.deploymentconfig_name }} + {% endif %} + +# Example alert payloads: + +# Restart specific pod: +# { +# "source": "openshift", +# "namespace": "production", +# "pod_name": "web-app-789-xyz", +# "pod_status": "CrashLoopBackOff" +# } + +# Scale deployment: +# { +# "source": "openshift", +# "namespace": "production", +# "deployment_name": "web-app", +# "replicas": 5 +# } + +# Scale deployment config: +# { +# "source": "openshift", +# "namespace": "staging", +# "deploymentconfig_name": "api-server", +# "replicas": 3 +# } + +# Rollout restart deployment: +# { +# "source": "openshift", +# "namespace": "production", +# "deployment_name": "web-app", +# "restart_deployment": true +# } \ No newline at end of file diff --git a/examples/workflows/opsgenie-close-alert.yml b/examples/workflows/opsgenie-close-alert.yml new file mode 100644 index 0000000000..7fe00495a3 --- /dev/null +++ b/examples/workflows/opsgenie-close-alert.yml @@ -0,0 +1,22 @@ +workflow: + id: opsgenie-alert-closer + name: OpsGenie Alert Closer + description: Closes OpsGenie alerts for resolved Coralogix alerts. + triggers: + - type: manual + - type: alert + filters: + - key: source + value: coralogix + - key: status + value: resolved + actions: + - name: close-alert + # run only if we have an opsgenie alert id + if: "'{{ alert.opsgenie_alert_id }}'" + provider: + config: "{{ providers.opsgenie }}" + type: opsgenie + with: + type: close_alert + alert_id: "{{ alert.opsgenie_alert_id }}" diff --git a/examples/workflows/opsgenie-create-alert-cel.yml b/examples/workflows/opsgenie-create-alert-cel.yml new file mode 100644 index 0000000000..e521a7eff3 --- /dev/null +++ b/examples/workflows/opsgenie-create-alert-cel.yml @@ -0,0 +1,22 @@ +workflow: + id: opsgenie-critical-alert-creator-cel + name: OpsGenie Critical Alert Creator (CEL) + description: Creates OpsGenie alerts for critical Coralogix issues with team assignment and alert enrichment tracking using CEL filters. + triggers: + - type: manual + - type: alert + cel: source.contains("coralogix") && severity == "critical" + actions: + - name: create-alert + if: "not '{{ alert.opsgenie_alert_id }}'" + provider: + config: "{{ providers.opsgenie }}" + type: opsgenie + with: + message: "{{ alert.name }}" + responders: + - name: "{{ alert.team }}" + type: team + enrich_alert: + - key: opsgenie_alert_id + value: results.alertId diff --git a/examples/workflows/opsgenie-create-alert.yml b/examples/workflows/opsgenie-create-alert.yml new file mode 100644 index 0000000000..a25b3b5948 --- /dev/null +++ b/examples/workflows/opsgenie-create-alert.yml @@ -0,0 +1,26 @@ +workflow: + id: opsgenie-critical-alert-creator + name: OpsGenie Critical Alert Creator + description: Creates OpsGenie alerts for critical Coralogix issues with team assignment and alert enrichment tracking. + triggers: + - type: manual + - type: alert + filters: + - key: source + value: coralogix + - key: severity + value: critical + actions: + - name: create-alert + if: "not '{{ alert.opsgenie_alert_id }}'" + provider: + type: opsgenie + config: "{{ providers.opsgenie }}" + with: + message: "{{ alert.name }}" + responders: + - name: "{{ alert.team }}" + type: team + enrich_alert: + - key: opsgenie_alert_id + value: results.alertId diff --git a/examples/workflows/opsgenie_open_alerts.yml b/examples/workflows/opsgenie_open_alerts.yml index a861981b9f..b110c30bd6 100644 --- a/examples/workflows/opsgenie_open_alerts.yml +++ b/examples/workflows/opsgenie_open_alerts.yml @@ -1,6 +1,10 @@ -alert: - id: opsgenie-get-open-alerts - description: Get open alerts from Opsgenie +workflow: + id: opsgenie-alert-monitor + name: OpsGenie Alert Monitor + description: Monitors open alerts in OpsGenie and sends detailed Slack notifications with priority levels and timestamps. + triggers: + - type: interval + value: 60 steps: - name: get-open-alerts provider: @@ -19,10 +23,10 @@ alert: message: > Opsgenie has {{ steps.get-open-alerts.results.number_of_alerts }} open alerts blocks: - - type: section - text: - type: mrkdwn - text: |- - {{#steps.get-open-alerts.results.alerts}} - - Alert Id: {{id}} | Priortiy: {{priority}} | Created at: {{created_at}} | Message: {{message}} - {{/steps.get-open-alerts.results.alerts}} + - type: section + text: + type: mrkdwn + text: |- + {{#steps.get-open-alerts.results.alerts}} + - Alert Id: {{id}} | Priortiy: {{priority}} | Created at: {{created_at}} | Message: {{message}} + {{/steps.get-open-alerts.results.alerts}} diff --git a/examples/workflows/pagerduty.yml b/examples/workflows/pagerduty.yml new file mode 100644 index 0000000000..4b54afe70f --- /dev/null +++ b/examples/workflows/pagerduty.yml @@ -0,0 +1,61 @@ +workflow: + id: pagerduty-example + name: PagerDuty workflow example + description: retrieve PagerDuty incident, create event and incident + triggers: + - type: manual + steps: + - name: check-incident-exist-pd-fingerprint + if: "{{ incident.fingerprint }} != ''" + provider: + type: pagerduty + config: "{{ providers.PagerDuty }}" + with: + incident_id: "{{ incident.fingerprint }}" + - name: check-incident-exist-pd-incident-key-dedup-key + provider: + type: pagerduty + config: "{{ providers.PagerDuty }}" + with: + incident_key: "7f3baa50-e7ef-4891-bd4a-d1ee310dff8f" + actions: + - name: pd-create-event + provider: + type: pagerduty + config: "{{ providers.PagerDuty }}" + with: + routing_key: 'your_routing_key' # optional, otherwise it will take from provider configuration$ + severity: critical + source: keep + component: job_service + group: job + class: job + custom_details: + environment: 'production' + url: 'https://keep.example.org' + links: + - href: "https://keep.example.com/" + text: "View in Keep" + dedup: "{{ incident.id }}" + event_type: trigger + title: "TestEvent" + - name: pd-create-inc + provider: + type: pagerduty + config: "{{ providers.PagerDuty }}" + with: + source: keep + alert_body: + details: + client: keep + client_url: "https://keep.example.com/incidents/{{ incident.id }}" + description: "{{ incident.user_summary }}" + alert_count: "{{ incident.alerts_count }}" + alerts: "{{ incident.alerts }}" + type: incident_body + dedup: "{{ incident.id }}" + status: "triggered" + service_id: "{{ incident.service_id }}" + requester: email@example.com + severity: "{{ incident.severity }}" + title: "{{ incident.user_generated_name }}" diff --git a/examples/workflows/pattern-matching-cel.yml b/examples/workflows/pattern-matching-cel.yml new file mode 100644 index 0000000000..8c2f6d505e --- /dev/null +++ b/examples/workflows/pattern-matching-cel.yml @@ -0,0 +1,13 @@ +workflow: + id: pattern-matching-monitor-cel + name: Pattern Matching Monitor (CEL) + description: Monitors alerts with pattern matching using CEL filters. + triggers: + - type: alert + cel: name.contains("error") || name.contains("failure") + actions: + - name: notify + provider: + type: console + with: + message: "Error or failure detected: {{ alert.name }}" diff --git a/examples/workflows/permissions_example.yml b/examples/workflows/permissions_example.yml new file mode 100644 index 0000000000..d3c04c3be3 --- /dev/null +++ b/examples/workflows/permissions_example.yml @@ -0,0 +1,36 @@ +workflow: + id: permissions-example + name: Permissions Example + description: "Demonstrates how to restrict workflow execution using permissions" + + # Restrict execution to admin role and specific users + permissions: + - admin + - sarah.smith@example.com # noc user + + triggers: + - type: manual + + steps: + - name: get-system-status + provider: + type: http + with: + url: "https://api.example.com/status" + method: GET + + actions: + - name: send-status-notification + provider: + type: slack + config: "{{ providers.slack-operations }}" + with: + channel: "#operations" + message: | + *Sensitive System Status Check* + + Status: {{ steps.get-system-status.results.status }} + Health: {{ steps.get-system-status.results.health }} + Last Updated: {{ steps.get-system-status.results.last_updated }} + + _This workflow has restricted permissions and can only be executed by authorized users._ diff --git a/examples/workflows/planner_basic.yml b/examples/workflows/planner_basic.yml index ab460af24e..34e8b32d4d 100644 --- a/examples/workflows/planner_basic.yml +++ b/examples/workflows/planner_basic.yml @@ -1,6 +1,7 @@ workflow: - id: planner-demo - description: Create a task in planner. + id: planner-task-creator + name: Microsoft Planner Task Creator + description: Creates tasks in Microsoft Planner with retry capabilities for reliable task creation. triggers: - type: interval value: 15 @@ -12,7 +13,7 @@ workflow: with: title: "Keep HQ Task1" plan_id: "tAtCor_XPEmqTzVqTigCycgABz0K" - on-failure: - retry: - count: 2 - interval: 2 \ No newline at end of file + on-failure: + retry: + count: 2 + interval: 2 diff --git a/examples/workflows/posthog_example.yml b/examples/workflows/posthog_example.yml new file mode 100644 index 0000000000..f932189060 --- /dev/null +++ b/examples/workflows/posthog_example.yml @@ -0,0 +1,47 @@ +workflow: + id: posthog-domain-tracker + name: PostHog Domain Tracker + description: Tracks domains from PostHog session recordings over the last 24 hours and sends a summary to Slack. + triggers: + - type: manual + - type: interval + value: 86400 # Run daily (in seconds) + steps: + - name: get-posthog-domains + provider: + config: "{{ providers.posthog }}" + type: posthog + with: + query_type: session_recording_domains + hours: 24 + limit: 500 + actions: + - name: send-to-slack + provider: + config: "{{ providers.slack }}" + type: slack + with: + blocks: + - type: header + text: + type: plain_text + text: "PostHog Session Recording Domains (Last 24 Hours)" + emoji: true + - type: section + text: + type: mrkdwn + text: "Found *{{ steps.get-posthog-domains.results.unique_domains_count }}* unique domains across *{{ steps.get-posthog-domains.results.total_domains_found }}* occurrences" + - type: divider + - type: section + text: + type: mrkdwn + text: "Domains:*" + - type: section + text: + type: mrkdwn + text: "{{#steps.get-posthog-domains.results.unique_domains}} + + • *{{ . }}* + + {{/steps.get-posthog-domains.results.unique_domains}}" + - type: divider diff --git a/examples/workflows/query-databend.yml b/examples/workflows/query-databend.yml new file mode 100644 index 0000000000..2ad333e1e5 --- /dev/null +++ b/examples/workflows/query-databend.yml @@ -0,0 +1,18 @@ +workflow: + id: databend-performance-monitor + name: Databend Performance Monitor + description: Executes performance analysis queries on Databend for large dataset operations. + disabled: false + triggers: + - type: manual + consts: {} + owners: [] + services: [] + steps: + - name: databend-step + provider: + type: databend + config: "{{ providers.databend }}" + with: + query: SELECT avg(number) FROM numbers(100000000) + actions: [] diff --git a/examples/workflows/query_clickhouse.yml b/examples/workflows/query_clickhouse.yml new file mode 100644 index 0000000000..2c5aa23826 --- /dev/null +++ b/examples/workflows/query_clickhouse.yml @@ -0,0 +1,33 @@ +workflow: + id: clickhouse-error-monitor + name: ClickHouse Error Monitor + description: Monitors ClickHouse logs for errors and sends notifications through both Ntfy and Slack channels. + triggers: + - type: manual + +steps: + - name: clickhouse-step + provider: + config: "{{ providers.clickhouse }}" + type: clickhouse + with: + query: SELECT * FROM logs_table ORDER BY timestamp DESC LIMIT 1; + single_row: "True" + +actions: + - name: ntfy-action + if: "'{{ steps.clickhouse-step.results.level }}' == 'ERROR'" + provider: + config: "{{ providers.ntfy }}" + type: ntfy + with: + message: "Error in clickhouse logs_table: {{ steps.clickhouse-step.results.level }}" + topic: clickhouse + + - name: slack-action + if: "'{{ steps.clickhouse-step.results.level }}' == 'ERROR'" + provider: + config: "{{ providers.slack }}" + type: slack + with: + message: "Error in clickhouse logs_table: {{ steps.clickhouse-step.results.level }}" diff --git a/examples/workflows/query_grafana_loki.yaml b/examples/workflows/query_grafana_loki.yaml new file mode 100644 index 0000000000..1094fce582 --- /dev/null +++ b/examples/workflows/query_grafana_loki.yaml @@ -0,0 +1,20 @@ +workflow: + id: loki-log-analyzer + name: Loki Log Analyzer + description: Analyzes log rates from Grafana Loki with customizable queries and time ranges for monitoring log patterns. + disabled: false + triggers: + - type: manual + consts: {} + owners: [] + services: [] + steps: + - name: grafana_loki-step + provider: + type: grafana_loki + config: "{{ providers.loki }}" + with: + query: sum(rate({job="varlogs"}[10m])) by (level) + queryType: query_range + step: 300 + actions: [] diff --git a/examples/workflows/query_mongodb.yaml b/examples/workflows/query_mongodb.yaml new file mode 100644 index 0000000000..468c924184 --- /dev/null +++ b/examples/workflows/query_mongodb.yaml @@ -0,0 +1,21 @@ +workflow: + id: mongodb-document-finder + name: MongoDB Document Finder + description: Executes targeted MongoDB queries with filters to retrieve specific documents from collections. + triggers: + - type: manual + steps: + - name: mongodb-step + provider: + config: "{{ providers.mongo }}" + type: mongodb + with: + # Please note that argument order is important for MongoDB queries. + query: | + { + "find": "mycollection", + "filter": { + "name": "First Document" + } + } + single_row: true diff --git a/examples/workflows/query_victorialogs.yaml b/examples/workflows/query_victorialogs.yaml new file mode 100644 index 0000000000..b580f1c983 --- /dev/null +++ b/examples/workflows/query_victorialogs.yaml @@ -0,0 +1,19 @@ +workflow: + id: victorialogs-stats-analyzer + name: VictoriaLogs Stats Analyzer + description: Analyzes VictoriaLogs data with statistical queries to track log level distributions and patterns. + disabled: false + triggers: + - type: manual + consts: {} + owners: [] + services: [] + steps: + - name: victorialogs-step + provider: + config: "{{ providers.logs }}" + type: victorialogs + with: + query: "* | stats by (level) count(*)" + queryType: stats_query_range + actions: [] diff --git a/examples/workflows/query_victoriametrics.yml b/examples/workflows/query_victoriametrics.yml new file mode 100644 index 0000000000..3633d1fc88 --- /dev/null +++ b/examples/workflows/query_victoriametrics.yml @@ -0,0 +1,46 @@ +workflow: + id: victoriametrics-threshold-monitor + name: VictoriaMetrics Threshold Monitor + description: Monitors VictoriaMetrics metrics with threshold-based alerts, sending notifications to both Slack and Ntfy. + triggers: + - type: manual + steps: + - name: victoriametrics-step + provider: + config: "{{ providers.victoriametrics }}" + type: victoriametrics + with: + query: avg(rate(process_cpu_seconds_total)) + queryType: query + + actions: + - name: trigger-slack1 + condition: + - name: threshold-condition + type: threshold + value: "{{ steps.victoriametrics-step.results.data.result.0.value.1 }}" + compare_to: 0.0050 + alias: A + compare_type: gt + provider: + type: slack + config: "{{ providers.slack }}" + with: + message: "Result: {{ steps.victoriametrics-step.results.data.result.0.value.1 }} is greater than 0.0040! 🚨" + + - name: trigger-slack2 + if: "{{ A }}" + provider: + type: slack + config: "{{ providers.slack }}" + with: + message: "Result: {{ steps.victoriametrics-step.results.data.result.0.value.1 }} is greater than 0.0040! 🚨" + + - name: trigger-ntfy + if: "{{ A }}" + provider: + type: ntfy + config: "{{ providers.ntfy }}" + with: + message: "Result: {{ steps.victoriametrics-step.results.data.result.0.value.1 }} is greater than 0.0040! 🚨" + topic: ezhil diff --git a/examples/workflows/raw_sql_query_datetime.yml b/examples/workflows/raw_sql_query_datetime.yml index 6f3819b10b..535b5bcab6 100644 --- a/examples/workflows/raw_sql_query_datetime.yml +++ b/examples/workflows/raw_sql_query_datetime.yml @@ -1,7 +1,11 @@ # Alert if a result queried from the DB is above a certain thershold. -alert: - id: raw-sql-query - description: Monitor that time difference is no more than 1 hour +workflow: + id: mysql-datetime-monitor + name: MySQL Datetime Monitor + description: Monitors time differences in MySQL database entries and alerts via Slack when exceeding one hour threshold. + triggers: + - type: interval + value: 300 # every 5 minutes steps: - name: get-max-datetime provider: @@ -13,14 +17,14 @@ alert: actions: - name: trigger-slack condition: - - name: threshold-condition - type: threshold - # datetime_compare(t1, t2) compares t1-t2 and returns the diff in hours - # utcnow() returns the local machine datetime in UTC - # to_utc() converts a datetime to UTC - value: keep.datetime_compare(keep.utcnow(), keep.to_utc("{{ steps.this.results[0][0] }}")) - compare_to: 1 # hours - compare_type: gt # greater than + - name: threshold-condition + type: threshold + # datetime_compare(t1, t2) compares t1-t2 and returns the diff in hours + # utcnow() returns the local machine datetime in UTC + # to_utc() converts a datetime to UTC + value: keep.datetime_compare(keep.utcnow(), keep.to_utc("{{ steps.this.results[0][0] }}")) + compare_to: 1 # hours + compare_type: gt # greater than provider: type: slack config: " {{ providers.slack-demo }} " diff --git a/examples/workflows/resolve_old_alerts.yml b/examples/workflows/resolve_old_alerts.yml new file mode 100644 index 0000000000..d8fca39104 --- /dev/null +++ b/examples/workflows/resolve_old_alerts.yml @@ -0,0 +1,27 @@ +workflow: + id: alert-auto-resolver + name: Alert Auto-Resolver + description: Automatically resolves stale alerts that haven't been updated in over an hour to maintain alert hygiene. + triggers: + - type: manual + - type: interval + value: 60 + steps: + # get the alerts from keep + - name: get-alerts + provider: + type: keep + with: + version: 2 + filter: "status == 'firing'" + actions: + - name: resolve-alerts + foreach: " {{ steps.get-alerts.results }} " + if: "keep.to_timestamp('{{ foreach.value.lastReceived }}') < keep.utcnowtimestamp() - 3600" + provider: + type: mock + with: + enrich_alert: + - key: status + value: resolved + disposable: true diff --git a/examples/workflows/retrieve_cloudwatch_logs.yaml b/examples/workflows/retrieve_cloudwatch_logs.yaml new file mode 100644 index 0000000000..6723dfcf71 --- /dev/null +++ b/examples/workflows/retrieve_cloudwatch_logs.yaml @@ -0,0 +1,26 @@ +workflow: + id: cloudwatch-log-retriever + name: CloudWatch Log Retriever + description: Retrieves and analyzes CloudWatch logs with custom queries, filtering, and alert generation capabilities. + triggers: + - type: manual + +steps: + - name: cw-logs + provider: + config: "{{ providers.cloudwatch }}" + type: cloudwatch + with: + log_group: "meow_logs" + query: "fields @message | sort @timestamp desc | limit 20" + hours: 12 + remove_ptr_from_results: true # We need only @message, no need for @ptr + +actions: + - name: raise-alert + if: keep.len( {{ steps.cw-logs.results }} ) > 0 + provider: + type: keep + with: + alert: + name: "CW logs found!" diff --git a/examples/workflows/run-github-workflow.yaml b/examples/workflows/run-github-workflow.yaml new file mode 100644 index 0000000000..c6d0124a68 --- /dev/null +++ b/examples/workflows/run-github-workflow.yaml @@ -0,0 +1,19 @@ +workflow: + id: run-github-workflow + name: Run GitHub Workflow + description: Triggers GitHub Actions workflows with customizable inputs for automated documentation testing. + triggers: + - type: manual + actions: + - name: run-gh-action + provider: + config: "{{ providers.github }}" + type: github + with: + run_action: true + repo_owner: keephq + repo_name: keep + workflow: test-docs.yml + inputs: + input1: value1 + input2: value2 diff --git a/examples/workflows/send-message-telegram-with-htmlmd.yaml b/examples/workflows/send-message-telegram-with-htmlmd.yaml new file mode 100644 index 0000000000..592f3273bd --- /dev/null +++ b/examples/workflows/send-message-telegram-with-htmlmd.yaml @@ -0,0 +1,31 @@ +workflow: + id: send-message-telegram-with-htmlmd + name: telegram + description: telegram + disabled: false + triggers: + - type: manual + consts: {} + owners: [] + services: [] + steps: [] + actions: + # Telegram only supports limited formatting. Refer https://core.telegram.org/bots/api#formatting-options + - name: telegram-action + provider: + type: telegram + config: "{{ providers.telegram }}" + with: + chat_id: 1072776973 + message: "This is html bold italic bold italic bold strikethrough italic bold strikethrough spoiler underline italic bold bold" + # Uses HTML + parse_mode: html + - name: telegram-action + provider: + type: telegram + config: "{{ providers.telegram }}" + with: + chat_id: 1072776973 + message: "This is markdown *bold _italic bold ~italic bold strikethrough ||italic bold strikethrough spoiler||~ __underline italic bold___ bold*" + # Uses MarkdownV2 + parse_mode: markdown diff --git a/examples/workflows/send_slack_message_on_failure.yaml b/examples/workflows/send_slack_message_on_failure.yaml new file mode 100644 index 0000000000..5eb86c06e2 --- /dev/null +++ b/examples/workflows/send_slack_message_on_failure.yaml @@ -0,0 +1,32 @@ +workflow: + id: send-slack-message-on-failure + name: Get alert root cause from OpenAI, notify if workflow fails + description: Get alert root cause from OpenAI, notify if workflow fails + triggers: + - type: alert + cel: alert.severity == "critical" + on-failure: + provider: + type: slack + config: "{{ providers.slack }}" + with: + channel: "" + # message will be injected from the workflow engine + # e.g. "Workflow failed with error: " + steps: + - name: openai-step + provider: + config: "{{ providers.openai }}" + type: openai + with: + prompt: | + You are a very talented engineer that receives critical alert and reports back the root + cause analysis. Here is the context: keep.json_dumps({{alert}}) (it is a JSON of the alert). + In your answer, also provide the reason why you think it is the root cause and specify what your certainty level is that it is the root cause. (between 1-10, where 1 is low and 10 is high) + actions: + - name: slack-action + provider: + config: "{{ providers.slack }}" + type: slack + with: + message: "{{steps.openai-step.results}}" diff --git a/examples/workflows/send_smtp_email.yml b/examples/workflows/send_smtp_email.yml new file mode 100644 index 0000000000..fe2e9839f0 --- /dev/null +++ b/examples/workflows/send_smtp_email.yml @@ -0,0 +1,19 @@ +workflow: + id: smtp-email-sender + name: SMTP Email Sender + description: Sends customized email notifications through SMTP with configurable sender, recipient, and message content. + triggers: + - type: manual + + actions: + - name: send-email + provider: + type: smtp + config: "{{ providers.smtp }}" + with: + from_email: "your_email@gmail.com" + from_name: "Workflow user" + to_email: + - "matvey@keephq.dev" + subject: "Hello from Keep workflow!" + body: "Hello! This is a test email from Keep workflow." diff --git a/examples/workflows/send_smtp_html_email.yml b/examples/workflows/send_smtp_html_email.yml new file mode 100644 index 0000000000..7855128b48 --- /dev/null +++ b/examples/workflows/send_smtp_html_email.yml @@ -0,0 +1,47 @@ +workflow: + id: smtp-html-email-sender + name: SMTP HTML Email Sender + description: Sends HTML-formatted email notifications through SMTP with customizable content and styling. + triggers: + - type: manual + + actions: + - name: send-html-email + provider: + type: smtp + config: "{{ providers.smtp }}" + with: + from_email: "your_email@gmail.com" + from_name: "Keep Workflow" + to_email: + - "recipient1@example.com" + - "recipient2@example.com" + subject: "Keep Alert Notification" + html: | + + +
+

Alert from Keep

+

This is an example of an HTML-formatted email sent via SMTP provider.

+ + + + + + + + + + + + + +
Alert TypeSystem Health Check
Status + ✓ Operational +
Timestamp{{ utcnow }}
+
+

Note: This email demonstrates the HTML formatting capabilities of the SMTP provider.

+
+
+ + \ No newline at end of file diff --git a/examples/workflows/sendgrid_basic.yml b/examples/workflows/sendgrid_basic.yml index 9316429c5a..7a1d86d3af 100644 --- a/examples/workflows/sendgrid_basic.yml +++ b/examples/workflows/sendgrid_basic.yml @@ -1,6 +1,7 @@ workflow: - id: sendgrid-basic-demo - description: send an email + id: sendgrid-notification-sender + name: SendGrid Notification Sender + description: Sends HTML-formatted email notifications to multiple recipients using SendGrid's email service. triggers: - type: manual actions: @@ -9,6 +10,8 @@ workflow: type: sendgrid config: " {{ providers.Sendgrid }} " with: - to: "youremail@gmail.com" - subject: "Hello from Keep!" - html: "Test with HTML" + to: + - "youremail@gmail.com" + - "youranotheremail@gmail.com" + subject: "Hello from Keep!" + html: "Test with HTML" diff --git a/examples/workflows/service-error-rate-monitor-datadog.yml b/examples/workflows/service-error-rate-monitor-datadog.yml new file mode 100644 index 0000000000..e9365c9e17 --- /dev/null +++ b/examples/workflows/service-error-rate-monitor-datadog.yml @@ -0,0 +1,37 @@ +# AUTO GENERATED +# Alert that was created with Keep semantic layer +# Prompt: can you write an alert spec that triggers when a service has more than 0.01% error rate in datadog for more than an hour? +workflow: + id: service-error-rate-monitor + name: Service Error Rate Monitor + description: Monitors service error rates through Datadog metrics, triggering alerts when error rate exceeds 0.01% for over an hour with Slack notifications. + owners: + - github-johndoe + - slack-janedoe + services: + - my-service + triggers: + - type: manual + steps: + - name: check-error-rate + provider: + type: datadog + config: "{{ providers.datadog }}" + with: + query: "sum:my_service.errors{*}.as_count() / sum:my_service.requests{*}.as_count() * 100" + timeframe: "1h" + actions: + - name: notify-slack + condition: + - name: threshold-condition + type: threshold + value: "{{ steps.check-error-rate.results }}" + compare_to: 0.01 + compare_type: gt + provider: + type: slack + config: "{{ providers.slack-demo }}" + with: + channel: service-alerts + message: > + The my_service error rate is higher than 0.01% for more than an hour. Please investigate. diff --git a/examples/workflows/severity_changed.yml b/examples/workflows/severity_changed.yml index db852f3f4a..363d4d29c4 100644 --- a/examples/workflows/severity_changed.yml +++ b/examples/workflows/severity_changed.yml @@ -1,6 +1,7 @@ workflow: - id: on-severity-change - description: demonstrates how to trigger a workflow when severity changes, and show available options + id: severity-change-monitor + name: Severity Change Monitor + description: Tracks alert severity changes and provides detailed notifications about severity level transitions. triggers: - type: alert severity_changed: true @@ -10,4 +11,4 @@ workflow: type: console with: # "The severity has changed from warning to info (it has decreased from last alert)" - alert_message: "The severity has changed from {{ alert.previous_severity }} to {{ alert.severity }} (it has {{ alert.severity_change }} since last alert)" + message: "The severity has changed from {{ alert.previous_severity }} to {{ alert.severity }} (it has {{ alert.severity_change }} since last alert)" diff --git a/examples/workflows/signl4-alerting-workflow.yaml b/examples/workflows/signl4-alerting-workflow.yaml index 7923b3129a..dc63faafd1 100644 --- a/examples/workflows/signl4-alerting-workflow.yaml +++ b/examples/workflows/signl4-alerting-workflow.yaml @@ -1,18 +1,20 @@ -id: signl4-alerting-workflow -description: handle alerts -triggers: -- filters: - - key: source - value: r".*" - type: alert -owners: [] -services: [] -steps: [] -actions: -- name: signl4-action - provider: - config: '{{ providers.SIGNL4 Alerting }}' - type: signl4 - with: - message: Test. - title: Keep Alert +workflow: + id: signl4-alert-notifier + name: SIGNL4 Alert Notifier + description: Routes alerts to SIGNL4 for mobile team alerting with customizable titles and messages. + triggers: + - filters: + - key: source + value: r".*" + type: alert + owners: [] + services: [] + steps: [] + actions: + - name: signl4-action + provider: + config: "{{ providers.signl4-alerting }}" + type: signl4 + with: + message: Test. + title: Keep Alert diff --git a/examples/workflows/simple_http_request_ntfy.yml b/examples/workflows/simple_http_request_ntfy.yml index 988ffbf911..d06f618858 100644 --- a/examples/workflows/simple_http_request_ntfy.yml +++ b/examples/workflows/simple_http_request_ntfy.yml @@ -1,7 +1,11 @@ # Alert if a result queried from the DB is above a certain thershold. -alert: - id: raw-sql-query - description: Monitor that time difference is no more than 1 hour +workflow: + id: mysql-ntfy-monitor + name: MySQL Ntfy Monitor + description: Monitors MySQL datetime values and sends notifications through Ntfy when thresholds are exceeded. + triggers: + - type: interval + value: 300 # every 5 minutes steps: - name: get-max-datetime provider: @@ -13,18 +17,20 @@ alert: actions: - name: trigger-ntfy condition: - - name: threshold-condition - type: threshold - # datetime_compare(t1, t2) compares t1-t2 and returns the diff in hours - # utcnow() returns the local machine datetime in UTC - # to_utc() converts a datetime to UTC - value: keep.datetime_compare(keep.utcnow(), keep.to_utc({{ steps.get-max-datetime.results[0][0] }})) - compare_to: 1 # hours - compare_type: gt # greater than + - name: threshold-condition + type: threshold + # datetime_compare(t1, t2) compares t1-t2 and returns the diff in hours + # utcnow() returns the local machine datetime in UTC + # to_utc() converts a datetime to UTC + value: keep.datetime_compare(keep.utcnow(), keep.to_utc({{ steps.get-max-datetime.results[0][0] }})) + compare_to: 1 # hours + compare_type: gt # greater than provider: type: http with: method: POST body: - message: "Time difference: {{ steps.get-max-datetime.conditions.threshold.0.value }}" + alert: "{{ alert }}" + fingerprint: "{{ alert.fingerprint }}" + some_customized_field: "{{ keep.strip(alert.some_attribute) }}" url: "https://ntfy.sh/MoRen5UlPEQr8s4Y" diff --git a/examples/workflows/slack-message-reaction.yml b/examples/workflows/slack-message-reaction.yml new file mode 100644 index 0000000000..7899ecd37b --- /dev/null +++ b/examples/workflows/slack-message-reaction.yml @@ -0,0 +1,88 @@ +workflow: + id: slack-alert-lifecycle + name: Slack Alert Lifecycle Manager + description: Manages alert lifecycle in Slack with automatic reactions for resolved alerts and enriched tenant information. + disabled: false + triggers: + - type: manual + - filters: + - key: source + value: gcpmonitoring + type: alert + consts: {} + owners: [] + services: [] + steps: [] + actions: + - name: slack-alert-resolved + if: "'{{ alert.slack_timestamp }}' and '{{ alert.status }}' == 'resolved'" + provider: + config: "{{ providers.keephq }}" + type: slack + with: + channel: C06PF9TCWUF + message: "white_check_mark" + thread_timestamp: "{{ alert.slack_timestamp }}" + notification_type: "reaction" + - name: get-tenant-name + if: "not '{{ alert.customer_name }}'" + provider: + config: "{{ providers.readonly }}" + type: mysql + with: + as_dict: true + enrich_alert: + - key: customer_name + value: results.name + query: select * from tenant where id = '{{ alert.tenantId }}' + single_row: true + - name: send-slack-alert + if: "not '{{ alert.slack_timestamp }}'" + provider: + config: "{{ providers.keephq }}" + type: slack + with: + enrich_alert: + - key: slack_timestamp + value: results.slack_timestamp + blocks: + - text: + emoji: true + text: "{{alert.gcp.policy_name}}" + type: plain_text + type: header + - elements: + - elements: + - text: "Tenant ID: {{alert.tenantId}}{{^alert.tenantId}}n/a{{/alert.tenantId}}" + type: text + type: rich_text_section + type: rich_text + - elements: + - elements: + - text: "Tenant Name: {{alert.customer_name}}{{^alert.customer_name}}n/a{{/alert.customer_name}}" + type: text + type: rich_text_section + type: rich_text + - elements: + - elements: + - text: "Scopes: {{alert.validatedScopes}}{{^alert.validatedScopes}}n/a{{/alert.validatedScopes}}" + type: text + type: rich_text_section + type: rich_text + - elements: + - elements: + - text: "Description: {{alert.content}}" + type: text + type: rich_text_section + type: rich_text + - elements: + - action_id: actionId-0 + text: + emoji: true + text: ":gcp: Original Alert" + type: plain_text + type: button + url: "{{alert.url}}" + type: actions + channel: C06PF9TCWUF + message: "" diff --git a/examples/workflows/slack-workflow-trigger.yml b/examples/workflows/slack-workflow-trigger.yml new file mode 100644 index 0000000000..112bb0ff6c --- /dev/null +++ b/examples/workflows/slack-workflow-trigger.yml @@ -0,0 +1,39 @@ +workflow: + id: slack-workflow-trigger + name: Slack Interactive Workflow Trigger + description: Creates an interactive Slack message with a button that can trigger another workflow, demonstrating workflow chaining through Slack interactions. + disabled: false + triggers: + - type: manual + - type: alert + consts: {} + owners: [] + services: [] + steps: [] + actions: + - name: send-slack-alert + if: "not '{{ alert.slack_timestamp }}'" + provider: + config: "{{ providers.slack-prod }}" + type: slack + with: + blocks: + - text: + emoji: true + text: "{{alert.name}}" + type: plain_text + type: header + - elements: + - action_id: actionId-0 + text: + emoji: true + text: "Trigger Slack Workflow" + type: plain_text + type: button + # The following will trigger the workflow with the whole alert object: + # url: "https://api.keephq.dev/workflows/WORKFLOW_ID_TO_EXECUTE/run?alert={{alert.id}}&api_key=YOUR_API_KEY" + # The following will trigger the workflow with the alert name, as an example, while any parameters can be passed: + url: "https://api.keephq.dev/workflows/WORKFLOW_ID_TO_EXECUTE/run?name={{alert.name}}&api_key=YOUR_API_KEY" + type: actions + channel: C06PF9TCWUF + message: "" diff --git a/examples/workflows/slack_basic.yml b/examples/workflows/slack_basic.yml index 9c73f1da6b..2ef31716e8 100644 --- a/examples/workflows/slack_basic.yml +++ b/examples/workflows/slack_basic.yml @@ -1,6 +1,7 @@ workflow: - id: slack-basic-demo - description: Send a slack message when a cloudwatch alarm is triggered + id: cloudwatch-slack-notifier + name: CloudWatch Slack Notifier + description: Forwards AWS CloudWatch alarms to Slack channels with customized alert messages. triggers: - type: alert filters: diff --git a/examples/workflows/slack_basic_cel.yml b/examples/workflows/slack_basic_cel.yml new file mode 100644 index 0000000000..01fb6b825e --- /dev/null +++ b/examples/workflows/slack_basic_cel.yml @@ -0,0 +1,15 @@ +workflow: + id: cloudwatch-slack-notifier-cel + name: CloudWatch Slack Notifier (CEL) + description: Forwards AWS CloudWatch alarms to Slack channels with customized alert messages using CEL filters. + triggers: + - type: alert + cel: source.contains("cloudwatch") + - type: manual + actions: + - name: trigger-slack + provider: + type: slack + config: " {{ providers.slack-prod }} " + with: + message: "Got alarm from aws cloudwatch! {{ alert.name }}" diff --git a/examples/workflows/slack_basic_interval.yml b/examples/workflows/slack_basic_interval.yml index 8cc9e0fbac..a7d7fb81b6 100644 --- a/examples/workflows/slack_basic_interval.yml +++ b/examples/workflows/slack_basic_interval.yml @@ -1,6 +1,7 @@ workflow: - id: slack-basic-demo - description: Send a slack message every interval + id: scheduled-slack-notifier + name: Scheduled Slack Notifier + description: Sends periodic Slack messages at configurable intervals for regular status updates or reminders. triggers: - type: interval value: 15 diff --git a/examples/workflows/slack_message_update.yml b/examples/workflows/slack_message_update.yml new file mode 100644 index 0000000000..0e0ccd43d7 --- /dev/null +++ b/examples/workflows/slack_message_update.yml @@ -0,0 +1,67 @@ +workflow: + id: zabbix-notification-lifecycle + name: Slack Notification Lifecycle Manager + description: Manages messages and updates as attachments in Slack with automatic updates on resolved alerts + disabled: false + triggers: + - type: manual + - type: alert + cel: severity > 'info' && source.contains('zabbix') + inputs: [] + consts: {} + owners: [] + services: [] + steps: [] + actions: + - name: slack-alert-resolved + if: "'{{ alert.slack_timestamp }}' and '{{ alert.status }}' == 'resolved'" + provider: + type: slack + config: "{{ providers.keephq }}" + with: + slack_timestamp: "{{alert.slack_timestamp}}" + channel: C06PF9TCWUF + attachments: + - color: good + title: "Resolved: {{alert.name}}" + title_link: "{{alert.url}}" + fields: + - title: Host + value: "{{alert.hostname}}" + short: true + - title: Severity + value: "{{alert.severity}}" + short: true + - title: Description + value: "{{alert.description}}" + short: true + - title: Time + value: "{{alert.time}}" + short: true + - name: slack-alert + if: not '{{ alert.slack_timestamp }}' or '{{alert.status}}' == 'firing' + provider: + type: slack + config: "{{ providers.keephq }}" + with: + enrich_alert: + - key: slack_timestamp + value: results.slack_timestamp + channel: C06PF9TCWUF + attachments: + - color: danger + title: "{{alert.name}}" + title_link: "{{alert.url}}" + fields: + - title: Host + value: "{{alert.hostname}}" + short: true + - title: Severity + value: "{{alert.severity}}" + short: true + - title: Description + value: "{{alert.description}}" + short: true + - title: Time + value: "{{alert.time}}" + short: true diff --git a/examples/workflows/squadcast_example.yml b/examples/workflows/squadcast_example.yml index 5849c5e3e2..8847c64570 100644 --- a/examples/workflows/squadcast_example.yml +++ b/examples/workflows/squadcast_example.yml @@ -1,15 +1,16 @@ workflow: - id: squadcast - description: squadcast + id: squadcast-incident-creator + name: SquadCast Incident Creator + description: Creates SquadCast incidents from alerts with customizable messages and additional context data. triggers: - type: alert actions: - name: create-incident provider: - config: "{{ providers.squadcast }}" - type: squadcast - with: - additional_json: '{{ alert }}' - description: TEST - message: '{{ alert.name }}-test' - notify_type: incident + config: "{{ providers.squadcast }}" + type: squadcast + with: + additional_json: "{{ alert }}" + description: TEST + message: "{{ alert.name }}-test" + notify_type: incident diff --git a/examples/workflows/teams-adaptive-card-notifier.yaml b/examples/workflows/teams-adaptive-card-notifier.yaml new file mode 100644 index 0000000000..de08b93d02 --- /dev/null +++ b/examples/workflows/teams-adaptive-card-notifier.yaml @@ -0,0 +1,24 @@ +workflow: + id: teams-adaptive-card-notifier + name: Teams Adaptive Card Notifier + description: Sends customized Microsoft Teams notifications using Adaptive Cards with dynamic alert information and formatted sections. + disabled: false + triggers: + - type: manual + - filters: + - key: source + value: r".*" + type: alert + consts: {} + owners: [] + services: [] + steps: [] + actions: + - name: teams-action + provider: + config: "{{ providers.teams }}" + type: teams + with: + message: "" + sections: '[{"type": "TextBlock", "text": "{{alert.name}}"}, {"type": "TextBlock", "text": "Tal from Keep"}]' + typeCard: message diff --git a/examples/workflows/teams-adaptive-cards-with-mentions.yaml b/examples/workflows/teams-adaptive-cards-with-mentions.yaml new file mode 100644 index 0000000000..23cde4604e --- /dev/null +++ b/examples/workflows/teams-adaptive-cards-with-mentions.yaml @@ -0,0 +1,24 @@ +workflow: + id: teams-adaptive-card-with-mentions + name: Teams Adaptive Card With Mentions + description: Sends Microsoft Teams notifications using Adaptive Cards with user mentions to notify specific team members. + disabled: false + triggers: + - type: manual + - filters: + - key: source + value: r".*" + type: alert + consts: {} + owners: [] + services: [] + steps: [] + actions: + - name: teams-action + provider: + config: "{{ providers.teams }}" + type: teams + with: + typeCard: message + sections: '[{"type": "TextBlock", "text": "Alert: {{alert.name}}"}, {"type": "TextBlock", "text": "Hello John Doe, please review this alert!"}, {"type": "TextBlock", "text": "Severity: {{alert.severity}}"}]' + mentions: '[{"id": "john.doe@example.com", "name": "John Doe"}]' diff --git a/examples/workflows/telegram_advanced.yml b/examples/workflows/telegram_advanced.yml new file mode 100644 index 0000000000..72cda7d19c --- /dev/null +++ b/examples/workflows/telegram_advanced.yml @@ -0,0 +1,20 @@ +workflow: + id: telegram-message-topic-markup + name: Telegram Message Sender with Topic Markup + description: Send messages into Telegram topic with a message containing a reply markup. + triggers: + - type: manual + actions: + - name: telegram + provider: + type: telegram + config: "{{ providers.telegram }}" + with: + message: "message with topic markup" + chat_id: "-1001234567890" + topic_id: "1234" + reply_markup: + 📌 Confluence 📖: + url: "confluence.example.com" + 📖 Documentation 📖: + url: "docs.example.com" diff --git a/examples/workflows/telegram_basic.yml b/examples/workflows/telegram_basic.yml index 2bdc45f63e..24d30ab932 100644 --- a/examples/workflows/telegram_basic.yml +++ b/examples/workflows/telegram_basic.yml @@ -1,6 +1,7 @@ workflow: - id: telegram-example - description: telegram-example + id: telegram-message-sender + name: Telegram Message Sender + description: Sends customized notifications to Telegram channels or users using environment-configured chat IDs. triggers: - type: manual actions: @@ -10,4 +11,5 @@ workflow: config: "{{ providers.telegram }}" with: message: "test" - chat_id: " {{ os.environ['TELEGRAM_CHAT_ID'] }}" + chat_id: "-1001234567890" + image_url: "https://cdn.prod.website-files.com/66adeb018210ff2165886994/67aa1f6766f15cb7ec62e962_Keep%20With%20Name.svg" diff --git a/examples/workflows/test_jira_create_with_custom_fields.yml b/examples/workflows/test_jira_create_with_custom_fields.yml new file mode 100644 index 0000000000..2c29bb174d --- /dev/null +++ b/examples/workflows/test_jira_create_with_custom_fields.yml @@ -0,0 +1,26 @@ +workflow: + id: test-jira-create-custom-fields + name: Test Jira Create with Custom Fields + description: Test workflow to demonstrate CREATE operations with custom fields + disabled: false + triggers: + - type: manual + inputs: [] + consts: {} + owners: [] + services: [] + steps: [] + actions: + - name: jira-action + provider: + type: jira + config: "{{ providers.jira }}" + with: + project_key: "TEST" + board_name: "TEST" + summary: "Create new issue with custom fields" + description: "This is a test issue created with custom fields" + issue_type: "Task" + custom_fields: + customfield_10696: "10" + customfield_10201: "Critical" diff --git a/examples/workflows/test_jira_custom_fields_fix.yml b/examples/workflows/test_jira_custom_fields_fix.yml new file mode 100644 index 0000000000..afe02600af --- /dev/null +++ b/examples/workflows/test_jira_custom_fields_fix.yml @@ -0,0 +1,27 @@ +workflow: + id: test-jira-custom-fields-fix + name: Test Jira Custom Fields Fix + description: Test workflow to demonstrate the fix for Jira custom fields update issue + disabled: false + triggers: + - type: manual + inputs: [] + consts: {} + owners: [] + services: [] + steps: [] + actions: + - name: jira-action + provider: + type: jira + config: "{{ providers.jira }}" + with: + issue_id: "{{ incident.ticket_id }}" + project_key: "TEST" + board_name: "TEST" + summary: "Update summary of an issue" + description: "Test description" + issue_type: "Task" + custom_fields: + customfield_10696: "10" + customfield_10201: "Critical" diff --git a/examples/workflows/trello_new_card_alert.yml b/examples/workflows/trello_new_card_alert.yml deleted file mode 100644 index 72f3265b09..0000000000 --- a/examples/workflows/trello_new_card_alert.yml +++ /dev/null @@ -1,38 +0,0 @@ -# A new trello card was created -alert: - id: notify-new-trello-card - description: Notify my slack when new trello card is created - steps: - - name: trello-cards - provider: - type: trello - config: "{{ providers.trello-provider }}" - with: - project-name: demo-project - board_id: hIjQQX9S - filter: "createCard" - condition: - - name: assert-condition - type: assert - assert: "{{ state.notify-new-trello-card.-1.alert_context.alert_steps_context.trello-cards.results.number_of_cards }} >= {{steps.trello-cards.results.number_of_cards }}" # if there are more than 0 new stargazers, trigger the action - actions: - - name: trigger-slack - provider: - type: slack - config: " {{ providers.slack-demo }} " - with: - channel: some-channel-that-youll-decide-later - # Message is always mandatory - message: > - A new card was created - -providers: - trello-provider: - description: Trello Production - authentication: - api_key: "{{ env.TRELLO_API_KEY }}" - api_token: "{{ env.TRELLO_API_TOKEN }}" - slack-demo: - description: Slack Demo - authentication: - webhook_url: "{{ env.SLACK_WEBHOOK_URL }}" diff --git a/examples/workflows/update-incident-grafana-incident.yaml b/examples/workflows/update-incident-grafana-incident.yaml new file mode 100644 index 0000000000..d83c79267f --- /dev/null +++ b/examples/workflows/update-incident-grafana-incident.yaml @@ -0,0 +1,44 @@ +workflow: + id: grafana-incident-enricher + name: Grafana Incident AI Enricher + description: Enriches Grafana incidents with AI-generated titles using OpenAI analysis of incident context. + triggers: + - type: incident + events: + - created + consts: {} + owners: [] + services: [] + steps: + - name: get-enrichments + provider: + type: openai + config: "{{ providers.openai }}" + with: + prompt: You received such an incident {{incident}}, generate title + model: gpt-4o-mini + structured_output_format: + type: json_schema + json_schema: + name: missing_fields + schema: + type: object + properties: + title: + type: string + description: "Anaylse the {{incident}} carefully and give a suitable title" + required: + - "title" + additionalProperties: false + strict: true + actions: + - name: grafana_incident-action + provider: + type: grafana_incident + config: "{{ providers.grafana }}" + with: + # Checkout https://docs.keephq.dev/providers/documentation/grafana_incident-provider for other available fields + updateType: updateIncidentTitle + operationType: update + incident_id: "{{ incident.fingerprint }}" + title: "{{ steps.get-enrichments.results.response.title }}" diff --git a/examples/workflows/update-task-in-asana.yaml b/examples/workflows/update-task-in-asana.yaml new file mode 100644 index 0000000000..1ff03ec9c9 --- /dev/null +++ b/examples/workflows/update-task-in-asana.yaml @@ -0,0 +1,20 @@ +workflow: + id: update-task-in-asana + name: Update task in asana + description: asana + disabled: false + triggers: + - type: manual + consts: {} + owners: [] + services: [] + steps: + - name: asana-step + provider: + type: asana + config: "{{ providers.asana }}" + with: + task_id: 1209749862246975 + completed: true + name: "done: updated the task" + actions: [] diff --git a/examples/workflows/update_jira_ticket.yml b/examples/workflows/update_jira_ticket.yml new file mode 100644 index 0000000000..a0c6981adf --- /dev/null +++ b/examples/workflows/update_jira_ticket.yml @@ -0,0 +1,17 @@ +workflow: + id: jira-ticket-updater + name: Jira Ticket Updater + description: Updates existing Jira issues with new summaries and descriptions while maintaining issue relationships. + triggers: + - type: manual + actions: + - name: jira-action + provider: + config: "{{ providers.Jira }}" + type: jira + with: + board_name: "" + description: Update description of an issue + issue_id: 10023 + project_key: "" + summary: Update summary of an issue diff --git a/examples/workflows/update_service_now_tickets_status.yml b/examples/workflows/update_service_now_tickets_status.yml index f4732cb3eb..9ed7771131 100644 --- a/examples/workflows/update_service_now_tickets_status.yml +++ b/examples/workflows/update_service_now_tickets_status.yml @@ -1,6 +1,7 @@ workflow: - id: servicenow - description: update the ticket status every minute + id: servicenow-ticket-sync + name: ServiceNow Ticket Sync + description: Synchronizes ServiceNow ticket statuses with Keep alerts and maintains bidirectional state tracking. triggers: - type: manual steps: @@ -11,8 +12,8 @@ workflow: # get all the alerts with sys_id (means that ticket exists for them) with: filters: - - key: ticket_type - value: servicenow + - key: ticket_type + value: servicenow actions: # update the tickets - name: update-ticket @@ -25,5 +26,5 @@ workflow: table_name: "{{ foreach.value.alert_enrichment.enrichments.table_name }}" fingerprint: "{{ foreach.value.alert_fingerprint }}" enrich_alert: - - key: ticket_status - value: results.state + - key: ticket_status + value: results.state diff --git a/examples/workflows/update_workflows_from_http.yml b/examples/workflows/update_workflows_from_http.yml new file mode 100644 index 0000000000..e5b6ab353c --- /dev/null +++ b/examples/workflows/update_workflows_from_http.yml @@ -0,0 +1,20 @@ +workflow: + id: http-workflow-sync + name: HTTP Workflow Sync + description: Updates Keep workflows from remote HTTP sources, supporting GitHub raw content and other HTTP endpoints. + triggers: + - type: manual + steps: + - name: get-workflow + provider: + type: http + with: + method: GET + url: "https://raw.githubusercontent.com/keephq/keep/refs/heads/main/examples/workflows/new_github_stars.yml" + + actions: + - name: update + provider: + type: keep + with: + workflow_to_update_yaml: "raw_render_without_execution({{ steps.get-workflow.results.body }})" diff --git a/examples/workflows/update_workflows_from_s3.yml b/examples/workflows/update_workflows_from_s3.yml new file mode 100644 index 0000000000..6ff68c3e09 --- /dev/null +++ b/examples/workflows/update_workflows_from_s3.yml @@ -0,0 +1,26 @@ +workflow: + id: s3-workflow-sync + name: S3 Workflow Sync + description: Synchronizes Keep workflows from S3 bucket storage with optional full sync capabilities. + triggers: + - type: manual + steps: + - name: s3-dump + provider: + config: "{{ providers.s3 }}" + type: s3 + with: + bucket: "keep-workflows" + actions: + # optional: delete all other workflows before updating for full sync + # - name: delete-all-other-workflows + # provider: + # type: keep + # with: + # delete_all_other_workflows: true + - name: update + foreach: "{{ steps.s3-dump.results }}" + provider: + type: keep + with: + workflow_to_update_yaml: "raw_render_without_execution({{ foreach.value }})" diff --git a/examples/workflows/webhook_example.yml b/examples/workflows/webhook_example.yml new file mode 100644 index 0000000000..d3f13a9c2b --- /dev/null +++ b/examples/workflows/webhook_example.yml @@ -0,0 +1,23 @@ +workflow: + id: webhook-test-runner + name: Webhook Test Runner + description: Tests webhook functionality with console logging and customizable message payloads. + debug: true + triggers: + - type: manual + + steps: + - name: console-test + provider: + type: console + with: + message: "Hello world!" + + actions: + - name: webhook-test + provider: + type: webhook + config: "{{ providers.test }}" + with: + body: + message: "Hello world" diff --git a/examples/workflows/webhook_example_foreach.yml b/examples/workflows/webhook_example_foreach.yml new file mode 100644 index 0000000000..a5cafb9a71 --- /dev/null +++ b/examples/workflows/webhook_example_foreach.yml @@ -0,0 +1,42 @@ +workflow: + id: webhook-batch-processor + name: Webhook Batch Processor + description: Processes multiple alerts through webhooks with conditional execution based on alert status. + debug: true + triggers: + - type: manual + + steps: + - name: webhook-get + provider: + type: webhook + config: "{{ providers.test }}" + with: + method: GET + url: "http://localhost:8000" + - name: get-alerts + foreach: " {{ steps.webhook-get.results.body.ids }}" + provider: + type: keep + with: + version: 2 + filter: 'id=="{{ foreach.value }}"' + actions: + - name: echo + foreach: " {{ steps.get-alerts.results }}" + if: '{{ foreach.value.0.status }} == "firing"' + provider: + type: console + with: + logger: true + message: "alert {{ foreach.value.0.id }} is {{ foreach.value.0.status }}" + # actions: + # - name: webhook-test + # foreach: " {{ steps.get-alerts.results }}" + # if: '{{ foreach.value.0.status }} == "firing"' + # provider: + # type: webhook + # config: "{{ providers.test }}" + # with: + # body: + # message: "Hello world" diff --git a/examples/workflows/workflow_only_first_time_example.yml b/examples/workflows/workflow_only_first_time_example.yml index 53e02efb80..0c3b9de1de 100644 --- a/examples/workflows/workflow_only_first_time_example.yml +++ b/examples/workflows/workflow_only_first_time_example.yml @@ -1,6 +1,7 @@ workflow: - id: alert-first-time - description: send slack message only the first time an alert fires + id: first-alert-notifier + name: First Alert Notifier + description: Sends Slack notifications only for the first occurrence of an alert within a 24-hour window. triggers: - type: alert filters: diff --git a/examples/workflows/workflow_start_example.yml b/examples/workflows/workflow_start_example.yml index 9ccdfadaff..cbfa8b6666 100644 --- a/examples/workflows/workflow_start_example.yml +++ b/examples/workflows/workflow_start_example.yml @@ -1,6 +1,7 @@ workflow: - id: alert-time-check - description: Handle alerts based on startedAt timestamp + id: tiered-alert-escalator + name: Tiered Alert Escalator + description: Manages alert escalation through different tiers based on alert duration with targeted Slack notifications. triggers: - type: alert filters: diff --git a/examples/workflows/zoom_chat_example.yml b/examples/workflows/zoom_chat_example.yml new file mode 100644 index 0000000000..55bd11213e --- /dev/null +++ b/examples/workflows/zoom_chat_example.yml @@ -0,0 +1,17 @@ +workflow: + id: zoom_chat-message + name: Zoom Chat Message + description: Sends a notification to a Zoom Chat channel via the Incoming Webhook application. + triggers: + - type: manual + actions: + - name: zoom_chat-action + provider: + type: zoom_chat + config: "{{ providers.zoom_chat }}" + with: + message: test message from keep + severity: critical + title: critical test message + tagged_users: joesmith@mail.com + details_url: https://www.github.com/keep \ No newline at end of file diff --git a/examples/workflows/zoom_example.yml b/examples/workflows/zoom_example.yml new file mode 100644 index 0000000000..ba9bd583f6 --- /dev/null +++ b/examples/workflows/zoom_example.yml @@ -0,0 +1,35 @@ +workflow: + id: zoom-warroom-creator + name: Zoom War Room Creator + description: Creates Zoom war room meetings for alerts with automatic recording and Slack notification containing join links. + triggers: + - type: manual + actions: + - name: create-zoom-meeting + provider: + type: zoom + config: "{{ providers.zoom }}" + with: + topic: "War room - {{ alert.name }}" + record_meeting: true + - name: send-slack-alert + provider: + config: "{{ providers.slack }}" + type: slack + with: + blocks: + - text: + emoji: true + text: "{{alert.name}}" + type: plain_text + type: header + - elements: + - action_id: actionId-0 + text: + emoji: true + text: "Join Warroom [Zoom]" + type: plain_text + type: button + url: "{{ steps.create-zoom-meeting.results.join_url }}" + type: actions + message: "" diff --git a/grafana/dashboards/keep.json b/grafana/dashboards/keep.json new file mode 100644 index 0000000000..a94725b1f5 --- /dev/null +++ b/grafana/dashboards/keep.json @@ -0,0 +1,737 @@ +{ + "annotations": { + "list": [] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 20, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 1, + "options": { + "legend": { + "calcs": ["mean", "max"], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "title": "Request Duration by Endpoint", + "type": "timeseries", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "rate(keep_http_request_duration_seconds_sum{handler!=\"none\"}[5m]) / rate(keep_http_request_duration_seconds_count{handler!=\"none\"}[5m])", + "legendFormat": "{{handler}}" + } + ] + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 2, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "10.0.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "sum(keep_running_tasks_current)", + "refId": "A" + } + ], + "title": "Running Tasks", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 20, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 3, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "sum(rate(keep_http_requests_total{status=~\"2..\"}[5m])) by (handler)", + "legendFormat": "{{handler}}", + "refId": "A" + } + ], + "title": "Request Rate by Endpoint (2xx)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 20, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "rate(keep_events_in_total[5m])", + "legendFormat": "Events In", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "rate(keep_events_processed_total[5m])", + "legendFormat": "Events Processed", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "rate(keep_events_error_total[5m])", + "legendFormat": "Events Error", + "refId": "C" + } + ], + "title": "Events Processing Rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 20, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 24 + }, + "id": 5, + "title": "Workflow Execution Duration", + "type": "timeseries", + "targets": [ + { + "expr": "rate(keep_workflows_execution_duration_seconds_sum[5m]) / rate(keep_workflows_execution_duration_seconds_count[5m])", + "legendFormat": "{{workflow_id}}" + } + ] + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 5 + }, + { + "color": "red", + "value": 10 + } + ] + } + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 24 + }, + "id": 6, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "title": "Workflow Queue Size", + "type": "gauge", + "targets": [ + { + "expr": "keep_workflows_queue_size", + "legendFormat": "{{tenant_id}}" + } + ] + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 20, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + } + } + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 7, + "title": "Workflow Executions", + "type": "timeseries", + "targets": [ + { + "expr": "rate(keep_workflows_executions_total[5m])", + "legendFormat": "{{workflow_id}} ({{trigger_type}})" + } + ] + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 20, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 8, + "options": { + "legend": { + "calcs": ["lastNotNull"], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "keep_events_in_total", + "legendFormat": "Total Events In", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "keep_events_processed_total", + "legendFormat": "Total Events Processed", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "keep_events_error_total", + "legendFormat": "Total Events Error", + "refId": "C" + } + ], + "title": "Total Events", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 20, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 32 + }, + "id": 9, + "options": { + "legend": { + "calcs": ["lastNotNull"], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "keep_workflows_executions_total", + "legendFormat": "{{workflow_id}} ({{trigger_type}})", + "refId": "A" + } + ], + "title": "Total Workflow Executions", + "type": "timeseries" + } + ], + "refresh": "5s", + "schemaVersion": 38, + "style": "dark", + "tags": ["keep"], + "templating": { + "list": [] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Keep Dashboard", + "uid": "keep", + "version": 1, + "weekStart": "" +} diff --git a/grafana/provisioning/dashboards/keep.yml b/grafana/provisioning/dashboards/keep.yml new file mode 100644 index 0000000000..6213d6185d --- /dev/null +++ b/grafana/provisioning/dashboards/keep.yml @@ -0,0 +1,11 @@ +apiVersion: 1 + +providers: + - name: "Keep" + orgId: 1 + folder: "" + type: file + disableDeletion: false + editable: true + options: + path: /etc/grafana/dashboards diff --git a/grafana/provisioning/datasources/prometheus.yml b/grafana/provisioning/datasources/prometheus.yml new file mode 100644 index 0000000000..a221c3c379 --- /dev/null +++ b/grafana/provisioning/datasources/prometheus.yml @@ -0,0 +1,9 @@ +apiVersion: 1 + +datasources: + - name: prometheus + type: prometheus + access: proxy + url: http://prometheus:9090 + isDefault: true + editable: true diff --git a/keep-ui/.dockerignore b/keep-ui/.dockerignore new file mode 100644 index 0000000000..d38b75ee93 --- /dev/null +++ b/keep-ui/.dockerignore @@ -0,0 +1,8 @@ +node_modules +.next +.vercel +.env.* +.venv/ +.vscode/ +.github/ +.pytest_cache diff --git a/keep-ui/.env.local.example b/keep-ui/.env.local.example index 87ec162aa0..ff479786fa 100644 --- a/keep-ui/.env.local.example +++ b/keep-ui/.env.local.example @@ -1,5 +1,8 @@ NEXTAUTH_URL=http://localhost:3000 -NEXTAUTH_SECRET= # Linux: `openssl rand -hex 32` or go to https://generate-secret.now.sh/32 + +# Required: +# NEXTAUTH_SECRET= # Linux: `openssl rand -hex 32` or go to https://generate-secret.now.sh/32 + # API API_URL=http://localhost:8080 # Auth @@ -11,4 +14,4 @@ PUSHER_HOST=localhost PUSHER_PORT=6001 PUSHER_APP_KEY=keepappkey # Logging -LOG_FORMAT=dev_terminal \ No newline at end of file +LOG_FORMAT=dev_terminal diff --git a/keep-ui/.eslintignore b/keep-ui/.eslintignore new file mode 100644 index 0000000000..3c3629e647 --- /dev/null +++ b/keep-ui/.eslintignore @@ -0,0 +1 @@ +node_modules diff --git a/keep-ui/.eslintrc.json b/keep-ui/.eslintrc.json index bffb357a71..4d765f2817 100644 --- a/keep-ui/.eslintrc.json +++ b/keep-ui/.eslintrc.json @@ -1,3 +1,3 @@ { - "extends": "next/core-web-vitals" + "extends": ["next/core-web-vitals", "prettier"] } diff --git a/keep-ui/.gitignore b/keep-ui/.gitignore index f070db838c..41c3e3360a 100644 --- a/keep-ui/.gitignore +++ b/keep-ui/.gitignore @@ -8,6 +8,8 @@ pids *.pid *.seed +!lib + # Directory for instrumented libs generated by jscoverage/JSCover lib-cov @@ -38,3 +40,15 @@ jspm_packages .next .env.local + +app/topology/mock-topology-data.tsx +.vercel + +# Sentry Config File +.env.sentry-build-plugin + +# Monaco workers (generated at build time for turbopack dev) +public/monaco-workers/ + +# TypeScript build info +tsconfig.tsbuildinfo diff --git a/keep-ui/.prettierrc b/keep-ui/.prettierrc new file mode 100644 index 0000000000..f0eb61e0f7 --- /dev/null +++ b/keep-ui/.prettierrc @@ -0,0 +1,6 @@ +{ + "trailingComma": "es5", + "tabWidth": 2, + "semi": true, + "singleQuote": false +} diff --git a/keep-ui/__mocks__/@monaco-editor/react.js b/keep-ui/__mocks__/@monaco-editor/react.js new file mode 100644 index 0000000000..a1b32029d4 --- /dev/null +++ b/keep-ui/__mocks__/@monaco-editor/react.js @@ -0,0 +1,30 @@ +const React = require('react'); + +module.exports = { + Editor: () => React.createElement('div', { 'data-testid': 'monaco-editor' }), + DiffEditor: () => React.createElement('div', { 'data-testid': 'monaco-diff-editor' }), + loader: { + config: jest.fn(), + init: jest.fn(() => Promise.resolve({ + editor: { + create: jest.fn(), + defineTheme: jest.fn(), + setTheme: jest.fn(), + getModel: jest.fn(), + setModelMarkers: jest.fn(), + }, + languages: { + register: jest.fn(), + setMonarchTokensProvider: jest.fn(), + setLanguageConfiguration: jest.fn(), + registerCompletionItemProvider: jest.fn(), + }, + MarkerSeverity: { + Error: 8, + Warning: 4, + Info: 2, + Hint: 1, + }, + })), + }, +}; \ No newline at end of file diff --git a/keep-ui/__mocks__/monaco-editor.js b/keep-ui/__mocks__/monaco-editor.js new file mode 100644 index 0000000000..e4423a7b14 --- /dev/null +++ b/keep-ui/__mocks__/monaco-editor.js @@ -0,0 +1,21 @@ +module.exports = { + editor: { + create: jest.fn(), + defineTheme: jest.fn(), + setTheme: jest.fn(), + getModel: jest.fn(), + setModelMarkers: jest.fn(), + }, + languages: { + register: jest.fn(), + setMonarchTokensProvider: jest.fn(), + setLanguageConfiguration: jest.fn(), + registerCompletionItemProvider: jest.fn(), + }, + MarkerSeverity: { + Error: 8, + Warning: 4, + Info: 2, + Hint: 1, + }, +}; \ No newline at end of file diff --git a/keep-ui/app/(health)/health/check.tsx b/keep-ui/app/(health)/health/check.tsx new file mode 100644 index 0000000000..dc41d688e8 --- /dev/null +++ b/keep-ui/app/(health)/health/check.tsx @@ -0,0 +1,65 @@ +"use client"; + +import ProvidersTiles from "@/app/(keep)/providers/providers-tiles"; +import React, { useEffect, useState } from "react"; +import { defaultProvider, Provider } from "@/shared/api/providers"; +import { useProvidersWithHealthCheck } from "@/utils/hooks/useProviders"; +import Loading from "@/app/(keep)/loading"; +import HealthPageBanner from "@/components/banners/health-page-banner"; + +const useFetchProviders = () => { + const [providers, setProviders] = useState([]); + const { data, error, mutate } = useProvidersWithHealthCheck(); + + if (error) { + throw error; + } + + const isLocalhost: boolean = true; + + useEffect(() => { + if (data) { + const fetchedProviders = data.providers + .filter((provider: Provider) => { + return provider.health; + }) + .map((provider) => ({ + ...defaultProvider, + ...provider, + id: provider.type, + installed: provider.installed ?? false, + health: provider.health, + })); + + setProviders(fetchedProviders); + } + }, [data]); + + return { + providers, + error, + isLocalhost, + mutate, + }; +}; + +export default function ProviderHealthPage() { + const { providers, isLocalhost, mutate } = useFetchProviders(); + + if (!providers || providers.length <= 0) { + return ; + } + + return ( + <> + + + + ); +} diff --git a/keep-ui/app/(health)/health/modal.tsx b/keep-ui/app/(health)/health/modal.tsx new file mode 100644 index 0000000000..be3f6a2851 --- /dev/null +++ b/keep-ui/app/(health)/health/modal.tsx @@ -0,0 +1,161 @@ +import React from "react"; +import Modal from "@/components/ui/Modal"; +import { + Badge, + BarChart, + Button, + Card, + DonutChart, + Subtitle, + Title, +} from "@tremor/react"; +import { CheckCircle2Icon } from "lucide-react"; + +interface ProviderHealthResultsModalProps { + handleClose: () => void; + isOpen: boolean; + healthResults: any; +} + +const ProviderHealthResultsModal = ({ + handleClose, + isOpen, + healthResults, +}: ProviderHealthResultsModalProps) => { + const handleModalClose = () => { + handleClose(); + }; + + return ( + +
+
+ + Spammy Alerts + {healthResults?.spammy?.length ? ( + <> + + Sorry to say, but looks like your alerts are spammy + + ) : ( + <> +
+ +
+ Everything is ok + + )} +
+ + Rules Quality + {healthResults?.rules?.unused ? ( + <> + + + {healthResults?.rules.unused} of your{" "} + {healthResults.rules.used + healthResults.rules.unused} alert + rules are not in use + + + ) : ( + <> +
+ +
+ Everything is ok + + )} +
+ + Actionable +
+ +
+ Everything is ok +
+ + + Topology coverage + {healthResults?.topology?.uncovered.length ? ( + <> + + + Not of your services are covered. Alerts are missing for: + {healthResults?.topology?.uncovered.map((service: any) => { + return ( + + {service.display_name + ? service.display_name + : service.service} + + ); + })} + + + ) : ( + <> +
+ +
+ Everything is ok + + )} +
+
+ + + Want to improve your observability? + + +
+
+ ); +}; + +export default ProviderHealthResultsModal; diff --git a/keep-ui/app/(health)/health/opengraph-image.png b/keep-ui/app/(health)/health/opengraph-image.png new file mode 100644 index 0000000000..6ca6f2de82 Binary files /dev/null and b/keep-ui/app/(health)/health/opengraph-image.png differ diff --git a/keep-ui/app/(health)/health/page.tsx b/keep-ui/app/(health)/health/page.tsx new file mode 100644 index 0000000000..7ba3f47ca8 --- /dev/null +++ b/keep-ui/app/(health)/health/page.tsx @@ -0,0 +1,11 @@ +import { Metadata } from "next"; +import ProviderHealthPage from "./check"; + +export const metadata: Metadata = { + title: "Keep – Check your alerts quality", + description: + "Easily check the configuration quality of your observability tools such as Datadog, Grafana, Prometheus, and more without the need to sign up.", +}; + +export default ProviderHealthPage; + diff --git a/keep-ui/app/(health)/layout.tsx b/keep-ui/app/(health)/layout.tsx new file mode 100644 index 0000000000..370f8b70ea --- /dev/null +++ b/keep-ui/app/(health)/layout.tsx @@ -0,0 +1,63 @@ +import React, { ReactNode } from "react"; +import { NextAuthProvider } from "../auth-provider"; +import { Mulish } from "next/font/google"; +import { ToastContainer } from "react-toastify"; +import { getConfig } from "@/shared/lib/server/getConfig"; +import { ConfigProvider } from "../config-provider"; +import { PHProvider } from "../posthog-provider"; +import ReadOnlyBanner from "@/components/banners/read-only-banner"; +import { auth } from "@/auth"; +import { ThemeScript, WatchUpdateTheme } from "@/shared/ui"; +import "@/app/globals.css"; +import "react-toastify/dist/ReactToastify.css"; +import { PostHogPageView } from "@/shared/ui/PostHogPageView"; + +// If loading a variable font, you don't need to specify the font weight +const mulish = Mulish({ + subsets: ["latin"], + display: "swap", +}); + +type RootLayoutProps = { + children: ReactNode; +}; + +export default async function RootLayout({ children }: RootLayoutProps) { + const config = getConfig(); + const session = await auth(); + + return ( + + + {/* ThemeScript must be the first thing to avoid flickering */} + + + + + {/* @ts-ignore-error Server Component */} + + {/* https://discord.com/channels/752553802359505017/1068089513253019688/1117731746922893333 */} +
+ {/* Add the banner here, before the navbar */} + {config.READ_ONLY && } +
{children}
+ {/** footer */} + {process.env.GIT_COMMIT_HASH && + process.env.SHOW_BUILD_INFO !== "false" && ( +
+
+ Version: {process.env.KEEP_VERSION} | Build:{" "} + {process.env.GIT_COMMIT_HASH.slice(0, 6)} +
+
+ )} + +
+
+
+
+ + + + ); +} diff --git a/keep-ui/app/(keep)/[...not-found]/page.tsx b/keep-ui/app/(keep)/[...not-found]/page.tsx new file mode 100644 index 0000000000..13a9bef779 --- /dev/null +++ b/keep-ui/app/(keep)/[...not-found]/page.tsx @@ -0,0 +1,8 @@ +"use client"; + +import { notFound } from "next/navigation"; + +// https://github.com/vercel/next.js/discussions/50034 +export default function NotFoundDummy() { + notFound(); +} diff --git a/keep-ui/app/(keep)/ai/ai-plugins.tsx b/keep-ui/app/(keep)/ai/ai-plugins.tsx new file mode 100644 index 0000000000..acb5b3225d --- /dev/null +++ b/keep-ui/app/(keep)/ai/ai-plugins.tsx @@ -0,0 +1,269 @@ +"use client"; + +import { Card, Title } from "@tremor/react"; +import { useAIStats, useAIActions } from "utils/hooks/useAI"; +import { useEffect, useMemo, useState } from "react"; +import Image from "next/image"; +import debounce from "lodash.debounce"; +import { + KeepLoader, + PageSubtitle, + showErrorToast, + showSuccessToast, +} from "@/shared/ui"; +import { PageTitle } from "@/shared/ui"; +import { AIConfig } from "./model"; + +function RangeInputWithLabel({ + setting, + onChange, +}: { + setting: any; + onChange: (newValue: number) => void; +}) { + const [value, setValue] = useState(setting.value); + + // Create a memoized debounced function + const debouncedOnChange = useMemo( + () => debounce((value: number) => onChange(value), 1000), + [onChange] + ); + + // Cleanup the debounced function on unmount + useEffect(() => { + return () => { + debouncedOnChange.cancel(); + }; + }, [debouncedOnChange]); + + return ( +
+

value: {value}

+ { + const newValue = + setting.type === "float" + ? parseFloat(e.target.value) + : parseInt(e.target.value, 10); + setValue(newValue); + debouncedOnChange(newValue); + }} + /> +
+ ); +} + +export function AIPlugins() { + const { + data: aistats, + isLoading, + mutate: refetchAIStats, + } = useAIStats({ + refreshInterval: 5000, + }); + const { updateAISettings } = useAIActions(); + + const handleUpdateAISettings = async ( + algorithm_id: string, + algorithm_config: AIConfig + ) => { + try { + await updateAISettings(algorithm_id, algorithm_config); + showSuccessToast("Settings updated successfully!"); + refetchAIStats(); + } catch (error) { + showErrorToast(error); + } + }; + + return ( +
+
+
+ AI Plugins + + For correlation, summarization, and enrichment + +
+
+ +
+
+
+ {isLoading ? ( + + ) : null} + {aistats?.algorithm_configs?.length === 0 && ( +
+ AI +
+ No AI enabled for this tenant +

+ AI plugins can correlate, enrich, or summarize your alerts + and incidents by leveraging the the context within Keep + allowing you to gain deeper insights and respond more + effectively. +

+

+ By the way, AI plugins are designed to work even in + air-gapped environments. You can train models using your + data, so there is no need to share information with + third-party providers like OpenAI. Keep your data secure + and private. +

+

+ + Talk to us to get access! + +

+
+
+ )} + {aistats?.algorithm_configs?.map((algorithm_config, index) => ( + +

+ {algorithm_config.algorithm.name} +

+

+ {algorithm_config.algorithm.description} +

+
+
+ {algorithm_config.settings.map((setting) => ( +
+ {setting.type === "bool" ? ( + { + const newValue = e.target.checked; + setting.value = newValue; + handleUpdateAISettings( + algorithm_config.algorithm_id, + algorithm_config + ); + }} + className="mt-2 bg-orange-500 accent-orange-200" + /> + ) : null} +
+

+ {setting.name} +

+

+ {setting.description} +

+
+ {setting.type === "float" ? ( +
+ { + setting.value = newValue; + handleUpdateAISettings( + algorithm_config.algorithm_id, + algorithm_config + ); + }} + /> +
+ ) : null} + {setting.type === "int" ? ( +
+ { + setting.value = newValue; + handleUpdateAISettings( + algorithm_config.algorithm_id, + algorithm_config + ); + }} + /> +
+ ) : null} +
+ ))} +
+ + {algorithm_config.settings_proposed_by_algorithm && + JSON.stringify(algorithm_config.settings) !== + JSON.stringify( + algorithm_config.settings_proposed_by_algorithm + ) && ( + + The new settings proposal +

+ The last time the model was trained and used for + inference, it suggested a configuration update. + However, please note that a configuration update + might not be very effective if the data quantity or + quality is low. For more details, please refer to + the logs below. +

+ {algorithm_config.settings_proposed_by_algorithm.map( + (proposed_setting: any, idx: number) => ( +
+

+ {proposed_setting.name}:{" "} + {String(proposed_setting.value)} +

+
+ ) + )} + +
+ )} +
+

Execution logs:

+
+                    {algorithm_config.feedback_logs
+                      ? algorithm_config.feedback_logs
+                      : "Algorithm not executed yet."}
+                  
+
+ ))} +
+
+
+
+
+ ); +} diff --git a/keep-ui/app/(keep)/ai/model.ts b/keep-ui/app/(keep)/ai/model.ts new file mode 100644 index 0000000000..e9c47181e2 --- /dev/null +++ b/keep-ui/app/(keep)/ai/model.ts @@ -0,0 +1,45 @@ +interface FloatOrIntSetting { + max?: number; + min?: number; + type: "float" | "int"; + value: number; +} + +interface BoolSetting { + type: "bool"; + value: boolean; +} + +interface BaseSetting { + name: string; + description: string; +} + +export type AlgorithmSetting = BaseSetting & (FloatOrIntSetting | BoolSetting); + +export interface Algorithm { + name: string; + description: string; + last_time_reminded?: string; +} + +export interface AIConfig { + id: string; + algorithm_id: string; + tenant_id: string; + settings: AlgorithmSetting[]; + settings_proposed_by_algorithm: AlgorithmSetting[]; + feedback_logs: string; + algorithm: Algorithm; +} + +export interface AIStats { + alerts_count: number; + incidents_count: number; + first_alert_datetime?: Date; + algorithm_configs: AIConfig[]; +} + +export interface AILogs { + log: string; +} diff --git a/keep-ui/app/(keep)/ai/page.tsx b/keep-ui/app/(keep)/ai/page.tsx new file mode 100644 index 0000000000..4cf019ee3e --- /dev/null +++ b/keep-ui/app/(keep)/ai/page.tsx @@ -0,0 +1,11 @@ +import { AIPlugins } from "./ai-plugins"; + +export default function Page() { + return ; +} + +export const metadata = { + title: "Keep - AI Correlation", + description: + "Correlate Alerts and Incidents with AI to identify patterns and trends.", +}; diff --git a/keep-ui/app/(keep)/alerts/[id]/page.tsx b/keep-ui/app/(keep)/alerts/[id]/page.tsx new file mode 100644 index 0000000000..6f943fdfca --- /dev/null +++ b/keep-ui/app/(keep)/alerts/[id]/page.tsx @@ -0,0 +1,20 @@ +import { createServerApiClient } from "@/shared/api/server"; +import AlertsPage from "./ui/alerts"; +import { getInitialFacets } from "@/features/filter/api"; + +type PageProps = { + params: Promise<{ id: string }>; + searchParams: Promise<{ [key: string]: string | string[] | undefined }>; +}; + +export default async function Page(props: PageProps) { + const params = await props.params; + const api = await createServerApiClient(); + const initialFacets = await getInitialFacets(api, "alerts"); + return ; +} + +export const metadata = { + title: "Keep - Alerts", + description: "Single pane of glass for all your alerts.", +}; diff --git a/keep-ui/app/(keep)/alerts/[id]/ui/__tests__/alerts-fingerprint.test.tsx b/keep-ui/app/(keep)/alerts/[id]/ui/__tests__/alerts-fingerprint.test.tsx new file mode 100644 index 0000000000..be6b579423 --- /dev/null +++ b/keep-ui/app/(keep)/alerts/[id]/ui/__tests__/alerts-fingerprint.test.tsx @@ -0,0 +1,364 @@ +/** + * Tests for the alerts.tsx fingerprint-modal fix. + * + * Bug: when alerts re-fetched (polling / WebSocket), the useEffect that opens + * the ViewAlertModal or EnrichAlertSidePanel was re-evaluated. If the alert + * list was momentarily empty (during the refetch) the component would fire a + * false "Alert fingerprint not found" toast and close the modal. + * + * Fix: `resolvedFingerprintRef` stores the fingerprint once it has been + * matched so that subsequent re-evaluations of the same fingerprint (with an + * empty or partial alerts list) do not trigger the error path. + */ +import React from "react"; +import { render, act, waitFor } from "@testing-library/react"; +import { useRouter, useSearchParams } from "next/navigation"; +import { useProviders } from "@/utils/hooks/useProviders"; +import { usePresets } from "@/entities/presets/model"; +import { useAlertsTableData } from "@/widgets/alerts-table/ui/useAlertsTableData"; +import { showErrorToast } from "@/shared/ui"; +import Alerts from "../alerts"; + +// ─── Mock navigation ───────────────────────────────────────────────────────── + +jest.mock("next/navigation", () => ({ + useRouter: jest.fn(), + useSearchParams: jest.fn(), +})); + +// ─── Mock data hooks ───────────────────────────────────────────────────────── + +jest.mock("@/utils/hooks/useProviders", () => ({ + useProviders: jest.fn(), +})); + +jest.mock("@/entities/presets/model", () => ({ + usePresets: jest.fn(), +})); + +jest.mock("@/widgets/alerts-table/ui/useAlertsTableData", () => ({ + useAlertsTableData: jest.fn(), +})); + +// ─── Mock UI utilities ─────────────────────────────────────────────────────── + +jest.mock("@/shared/ui", () => ({ + showErrorToast: jest.fn(), + KeepLoader: () => null, +})); + +// ─── Mock all heavy child components ──────────────────────────────────────── +// Only ViewAlertModal and EnrichAlertSidePanel render observable testid +// attributes so we can assert that the fix works. + +jest.mock("../alert-table-tab-panel-server-side", () => ({ + __esModule: true, + default: () =>
, +})); + +jest.mock("@/features/alerts/alert-history", () => ({ + AlertHistoryModal: () => null, +})); + +jest.mock("@/features/alerts/alert-assign-ticket", () => ({ + AlertAssignTicketModal: () => null, +})); + +jest.mock("@/features/alerts/alert-note", () => ({ + AlertNoteModal: () => null, +})); + +jest.mock("@/features/alerts/alert-call-provider-method", () => ({ + AlertMethodModal: () => null, +})); + +jest.mock("@/features/workflows/manual-run-workflow", () => ({ + ManualRunWorkflowModal: () => null, +})); + +jest.mock("@/features/alerts/dismiss-alert", () => ({ + AlertDismissModal: () => null, +})); + +jest.mock("@/features/alerts/view-raw-alert", () => ({ + // Renders a testid only when an alert is supplied so tests can assert on it. + ViewAlertModal: ({ alert }: any) => + alert ?
: null, +})); + +jest.mock("@/features/alerts/alert-change-status", () => ({ + AlertChangeStatusModal: () => null, +})); + +jest.mock("@/features/alerts/enrich-alert", () => ({ + EnrichAlertSidePanel: ({ isOpen }: any) => + isOpen ?
: null, +})); + +jest.mock("@/app/(keep)/not-found", () => ({ + __esModule: true, + default: () =>
Not Found
, +})); + +// ─── Helpers ───────────────────────────────────────────────────────────────── + +const makeAlert = (fingerprint: string) => ({ + id: fingerprint, + fingerprint, + name: `Alert ${fingerprint}`, + description: "", + severity: "critical", + status: "firing", + source: ["prometheus"], + lastReceived: new Date(), + environment: "production", + pushed: false, + deleted: false, + dismissed: false, + enriched_fields: [], +}); + +const baseAlertsData = { + alerts: [] as ReturnType[], + alertsLoading: false, + mutateAlerts: jest.fn(), + alertsError: null, + totalCount: 0, + facetsCel: null, + facetsPanelRefreshToken: null, +}; + +const mockReplace = jest.fn(); +const mockSearchParamsGet = jest.fn(); + +// ─── Global setup ──────────────────────────────────────────────────────────── + +beforeEach(() => { + jest.clearAllMocks(); + + (useRouter as jest.Mock).mockReturnValue({ + replace: mockReplace, + push: jest.fn(), + back: jest.fn(), + }); + + // Return an object with a controllable .get() so each test can set params. + (useSearchParams as jest.Mock).mockReturnValue({ + get: mockSearchParamsGet, + }); + // Default: no query params. + mockSearchParamsGet.mockReturnValue(null); + + (useProviders as jest.Mock).mockReturnValue({ + data: { installed_providers: [] }, + }); + + // Return empty saved presets; "feed" comes from defaultPresets inside the + // component so selectedPreset will be found without any extra setup. + (usePresets as jest.Mock).mockReturnValue({ + dynamicPresets: [], + isLoading: false, + }); + + (useAlertsTableData as jest.Mock).mockReturnValue(baseAlertsData); +}); + +// ─── Tests ─────────────────────────────────────────────────────────────────── + +describe("Alerts — fingerprint modal fix (dataSettled guard)", () => { + it("does NOT fire error when alerts is briefly empty but totalCount > 0 (stale-empty SWR flash)", async () => { + // Regression test for the 3-render cascade in useLastAlerts: + // SWR marks isLoading=false before the React state carrying the real results + // has been flushed. For one render, alerts=[] while totalCount is already + // the real count (>0). The fix: only act when alerts.length>0 OR totalCount===0. + + const alert = makeAlert("fp-stale"); + + mockSearchParamsGet.mockImplementation((key: string) => + key === "alertPayloadFingerprint" ? "fp-stale" : null + ); + + // Phase 1 — stale-empty flash: alerts=[], alertsLoading=false, totalCount=5 + (useAlertsTableData as jest.Mock).mockReturnValue({ + ...baseAlertsData, + alerts: [], + alertsLoading: false, + totalCount: 5, + }); + + const { rerender } = render( + + ); + + // No error should fire during the stale-empty phase. + await waitFor(() => { + expect(showErrorToast).not.toHaveBeenCalled(); + }); + + // Phase 2 — real data arrives: alerts=[alert], totalCount=1 + (useAlertsTableData as jest.Mock).mockReturnValue({ + ...baseAlertsData, + alerts: [alert], + alertsLoading: false, + totalCount: 1, + }); + + await act(async () => { + rerender(); + }); + + // Modal should open and still no error. + expect(showErrorToast).not.toHaveBeenCalled(); + expect(mockReplace).not.toHaveBeenCalled(); + }); +}); + +describe("Alerts — fingerprint modal fix (resolvedFingerprintRef)", () => { + it("opens view modal when fingerprint is found and shows no error", async () => { + const alert = makeAlert("fp-abc"); + + mockSearchParamsGet.mockImplementation((key: string) => + key === "alertPayloadFingerprint" ? "fp-abc" : null + ); + + (useAlertsTableData as jest.Mock).mockReturnValue({ + ...baseAlertsData, + alerts: [alert], + }); + + const { findByTestId } = render( + + ); + + // Modal should appear. + await findByTestId("view-alert-modal"); + expect(showErrorToast).not.toHaveBeenCalled(); + }); + + it("shows error toast when fingerprint is not in the alerts list", async () => { + mockSearchParamsGet.mockImplementation((key: string) => + key === "alertPayloadFingerprint" ? "fp-missing" : null + ); + + // Alerts list present but does not contain the requested fingerprint. + (useAlertsTableData as jest.Mock).mockReturnValue({ + ...baseAlertsData, + alerts: [makeAlert("fp-other")], + }); + + render(); + + await waitFor(() => { + expect(showErrorToast).toHaveBeenCalledWith( + null, + "Alert fingerprint not found" + ); + }); + + // URL should have been cleared. + expect(mockReplace).toHaveBeenCalled(); + }); + + it("does NOT show error toast on background re-fetch after fingerprint was resolved", async () => { + // Core regression test: after a successful modal open, the alerts list + // briefly empties (due to a polling re-fetch), then repopulates. + // Without the fix, the empty-list evaluation fires the error toast. + + const alert = makeAlert("fp-abc"); + + mockSearchParamsGet.mockImplementation((key: string) => + key === "alertPayloadFingerprint" ? "fp-abc" : null + ); + + // Step 1 — alert is present; modal opens and ref is stored. + (useAlertsTableData as jest.Mock).mockReturnValue({ + ...baseAlertsData, + alerts: [alert], + }); + + const { rerender } = render( + + ); + + await waitFor(() => { + expect(showErrorToast).not.toHaveBeenCalled(); + }); + + // Step 2 — alerts list empties mid-refetch. + (useAlertsTableData as jest.Mock).mockReturnValue({ + ...baseAlertsData, + alerts: [], + }); + + await act(async () => { + rerender(); + }); + + // The fix: resolvedFingerprintRef is still "fp-abc" so the error path is + // skipped. + expect(showErrorToast).not.toHaveBeenCalled(); + expect(mockReplace).not.toHaveBeenCalled(); + }); + + it("opens enrich sidebar when both fingerprint and enrich params are present", async () => { + const alert = makeAlert("fp-enrich"); + + mockSearchParamsGet.mockImplementation((key: string) => { + if (key === "alertPayloadFingerprint") return "fp-enrich"; + if (key === "enrich") return "true"; + return null; + }); + + (useAlertsTableData as jest.Mock).mockReturnValue({ + ...baseAlertsData, + alerts: [alert], + }); + + const { findByTestId } = render( + + ); + + await findByTestId("enrich-sidebar"); + expect(showErrorToast).not.toHaveBeenCalled(); + }); + + it("resets the ref and opens modal correctly when navigating to a different fingerprint", async () => { + // Ensure that navigating from fp-1 to fp-2 does NOT inherit fp-1's ref + // and still opens fp-2's modal without errors. + + const alert1 = makeAlert("fp-1"); + const alert2 = makeAlert("fp-2"); + + mockSearchParamsGet.mockImplementation((key: string) => + key === "alertPayloadFingerprint" ? "fp-1" : null + ); + + (useAlertsTableData as jest.Mock).mockReturnValue({ + ...baseAlertsData, + alerts: [alert1, alert2], + }); + + const { rerender } = render( + + ); + + // First fingerprint resolved — no errors. + await waitFor(() => { + expect(showErrorToast).not.toHaveBeenCalled(); + }); + + // Navigate to a different fingerprint. + mockSearchParamsGet.mockImplementation((key: string) => + key === "alertPayloadFingerprint" ? "fp-2" : null + ); + + (showErrorToast as jest.Mock).mockClear(); + + await act(async () => { + rerender(); + }); + + // fp-2 is present in the list, so the modal should open without error. + expect(showErrorToast).not.toHaveBeenCalled(); + }); +}); diff --git a/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-alert-facets.tsx b/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-alert-facets.tsx new file mode 100644 index 0000000000..640783324f --- /dev/null +++ b/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-alert-facets.tsx @@ -0,0 +1,290 @@ +import React, { useCallback } from "react"; +import { AlertFacetsProps, FacetValue } from "./alert-table-facet-types"; +import { Facet } from "./alert-table-facet"; +import { + getFilteredAlertsForFacet, + getSeverityOrder, +} from "./alert-table-facet-utils"; +import { useLocalStorage } from "@/utils/hooks/useLocalStorage"; +import { AlertDto } from "@/entities/alerts/model"; +import { + DynamicFacetWrapper, + AddFacetModal, +} from "./alert-table-facet-dynamic"; +import { PlusIcon } from "@heroicons/react/24/outline"; +import { usePathname } from "next/navigation"; + +export const AlertFacets: React.FC = ({ + alerts, + facetFilters, + setFacetFilters, + dynamicFacets, + setDynamicFacets, + onDelete, + className, + table, + showSkeleton, +}) => { + const pathname = usePathname(); + const timeRangeFilter = table + .getState() + .columnFilters.find((filter) => filter.id === "lastReceived"); + + const timeRange = timeRangeFilter?.value as + | { start: Date; end: Date; isFromCalendar: boolean } + | undefined; + + const presetName = pathname?.split("/").pop() || "default"; + + const [isModalOpen, setIsModalOpen] = useLocalStorage( + `addFacetModalOpen-${presetName}`, + false + ); + + const handleSelect = ( + facetKey: string, + value: string, + exclusive: boolean, + isAllOnly: boolean + ) => { + const newFilters = { ...facetFilters }; + + if (isAllOnly) { + if (exclusive) { + newFilters[facetKey] = [value]; + } else { + delete newFilters[facetKey]; + } + } else { + if (exclusive) { + newFilters[facetKey] = [value]; + } else { + const currentValues = newFilters[facetKey] || []; + if (currentValues.includes(value)) { + newFilters[facetKey] = currentValues.filter((v) => v !== value); + if (newFilters[facetKey].length === 0) { + delete newFilters[facetKey]; + } + } else { + newFilters[facetKey] = [...currentValues, value]; + } + } + } + + setFacetFilters(newFilters); + }; + + const getFacetValues = useCallback( + (key: keyof AlertDto | string): FacetValue[] => { + const filteredAlerts = getFilteredAlertsForFacet( + alerts, + facetFilters, + key, + timeRange + ); + const valueMap = new Map(); + let nullCount = 0; + + filteredAlerts.forEach((alert) => { + let value; + + // Handle nested keys like "labels.host" + if (typeof key === "string" && key.includes(".")) { + const [parentKey, childKey] = key.split("."); + const parentValue = alert[parentKey as keyof AlertDto]; + + if ( + typeof parentValue === "object" && + parentValue !== null && + !Array.isArray(parentValue) && + !(parentValue instanceof Date) + ) { + value = (parentValue as Record)[childKey]; + } else { + value = undefined; + } + } else { + value = alert[key as keyof AlertDto]; + } + + if (Array.isArray(value)) { + if (value.length === 0) { + nullCount++; + } else { + value.forEach((v) => { + valueMap.set(v, (valueMap.get(v) || 0) + 1); + }); + } + } else if (value !== undefined && value !== null) { + const strValue = String(value); + valueMap.set(strValue, (valueMap.get(strValue) || 0) + 1); + } else { + nullCount++; + } + }); + + let values = Array.from(valueMap.entries()).map(([label, count]) => ({ + label, + count, + isSelected: + facetFilters[key]?.includes(label) || !facetFilters[key]?.length, + })); + + if (["assignee", "incident"].includes(key as string) && nullCount > 0) { + values.push({ + label: "n/a", + count: nullCount, + isSelected: + facetFilters[key]?.includes("n/a") || !facetFilters[key]?.length, + }); + } + + if (key === "severity") { + values.sort((a, b) => { + if (a.label === "n/a") return 1; + if (b.label === "n/a") return -1; + const orderDiff = + getSeverityOrder(b.label) - getSeverityOrder(a.label); + if (orderDiff !== 0) return orderDiff; + return b.count - a.count; + }); + } else { + values.sort((a, b) => { + if (a.label === "n/a") return 1; + if (b.label === "n/a") return -1; + return b.count - a.count; + }); + } + + return values; + }, + [alerts, facetFilters, timeRange] + ); + + const staticFacets = [ + "severity", + "status", + "source", + "assignee", + "dismissed", + "incident", + ]; + + const handleAddFacet = (column: string) => { + setDynamicFacets([ + ...dynamicFacets, + { + key: column, + name: column.charAt(0).toUpperCase() + column.slice(1), + }, + ]); + }; + + const handleDeleteFacet = (facetKey: string) => { + setDynamicFacets(dynamicFacets.filter((df) => df.key !== facetKey)); + const newFilters = { ...facetFilters }; + delete newFilters[facetKey]; + setFacetFilters(newFilters); + }; + + return ( +
+
+ {/* Facet button */} + + + handleSelect("severity", value, exclusive, isAllOnly) + } + facetKey="severity" + facetFilters={facetFilters} + showSkeleton={showSkeleton} + /> + + handleSelect("status", value, exclusive, isAllOnly) + } + facetKey="status" + facetFilters={facetFilters} + showSkeleton={showSkeleton} + /> + + handleSelect("source", value, exclusive, isAllOnly) + } + facetKey="source" + facetFilters={facetFilters} + showSkeleton={showSkeleton} + /> + + handleSelect("assignee", value, exclusive, isAllOnly) + } + facetKey="assignee" + facetFilters={facetFilters} + showSkeleton={showSkeleton} + /> + + handleSelect("dismissed", value, exclusive, isAllOnly) + } + facetKey="dismissed" + facetFilters={facetFilters} + showSkeleton={showSkeleton} + /> + + handleSelect("incident", value, exclusive, isAllOnly) + } + facetFilters={facetFilters} + showSkeleton={showSkeleton} + /> + {/* Dynamic facets */} + {dynamicFacets.map((facet) => ( + + handleSelect(facet.key, value, exclusive, isAllOnly) + } + facetKey={facet.key} + facetFilters={facetFilters} + onDelete={() => handleDeleteFacet(facet.key)} + /> + ))} + + {/* Facet Modal */} + setIsModalOpen(false)} + table={table} + onAddFacet={handleAddFacet} + existingFacets={[ + ...staticFacets, + ...dynamicFacets.map((df) => df.key), + ]} + /> +
+
+ ); +}; diff --git a/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-facet-dynamic.tsx b/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-facet-dynamic.tsx new file mode 100644 index 0000000000..f45bb129fb --- /dev/null +++ b/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-facet-dynamic.tsx @@ -0,0 +1,93 @@ +import React, { useState } from "react"; +import { TextInput } from "@tremor/react"; +import { TrashIcon } from "@heroicons/react/24/outline"; +import { FacetProps } from "./alert-table-facet-types"; +import { AlertDto } from "@/entities/alerts/model"; +import { Facet } from "./alert-table-facet"; +import Modal from "@/components/ui/Modal"; +import { Table } from "@tanstack/table-core"; +import { FiSearch } from "react-icons/fi"; + +interface AddFacetModalProps { + isOpen: boolean; + onClose: () => void; + table: Table; + onAddFacet: (column: string) => void; + existingFacets: string[]; +} + +export const AddFacetModal: React.FC = ({ + isOpen, + onClose, + table, + onAddFacet, + existingFacets, +}) => { + const [searchTerm, setSearchTerm] = useState(""); + + const availableColumns = table + .getAllColumns() + .filter( + (col) => + // Filter out pinned columns and existing facets + !col.getIsPinned() && + !existingFacets.includes(col.id) && + // Filter by search term + col.id.toLowerCase().includes(searchTerm.toLowerCase()) + ) + .map((col) => col.id); + + return ( + +
+ setSearchTerm(e.target.value)} + className="mb-4" + /> +
+ {availableColumns.map((column) => ( + + ))} +
+
+
+ ); +}; + +export interface DynamicFacetProps extends FacetProps { + onDelete: () => void; +} + +export const DynamicFacetWrapper: React.FC = ({ + onDelete, + ...facetProps +}) => { + return ( +
+ + +
+ ); +}; diff --git a/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-facet-types.tsx b/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-facet-types.tsx new file mode 100644 index 0000000000..bc0b209665 --- /dev/null +++ b/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-facet-types.tsx @@ -0,0 +1,53 @@ +import { AlertDto } from "@/entities/alerts/model"; +import { Table } from "@tanstack/table-core"; + +export interface DynamicFacet { + key: string; + name: string; +} + +export interface FacetValue { + label: string; + count: number; + isSelected: boolean; +} + +export interface FacetFilters { + [key: string]: string[]; +} + +export interface FacetValueProps { + label: string; + count: number; + isSelected: boolean; + onSelect: (value: string, exclusive: boolean, isAllOnly: boolean) => void; + facetKey: string; + showIcon?: boolean; + facetFilters: FacetFilters; +} + +export interface FacetProps { + name: string; + values: FacetValue[]; + onSelect: (value: string, exclusive: boolean, isAllOnly: boolean) => void; + facetKey: string; + facetFilters: FacetFilters; + showIcon?: boolean; + showSkeleton?: boolean; +} + +export interface AlertFacetsProps { + alerts: AlertDto[]; + facetFilters: FacetFilters; + setFacetFilters: ( + filters: FacetFilters | ((filters: FacetFilters) => FacetFilters) + ) => void; + dynamicFacets: DynamicFacet[]; + setDynamicFacets: ( + facets: DynamicFacet[] | ((facets: DynamicFacet[]) => DynamicFacet[]) + ) => void; + onDelete: (facetKey: string) => void; + className?: string; + table: Table; + showSkeleton?: boolean; +} diff --git a/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-facet-utils.tsx b/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-facet-utils.tsx new file mode 100644 index 0000000000..41a5c87ef8 --- /dev/null +++ b/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-facet-utils.tsx @@ -0,0 +1,91 @@ +import { FacetFilters } from "./alert-table-facet-types"; +import { AlertDto } from "@/entities/alerts/model"; +import { isQuickPresetRange } from "@/components/ui/DateRangePicker"; + +export const getFilteredAlertsForFacet = ( + alerts: AlertDto[], + facetFilters: FacetFilters, + currentFacetKey: string, + timeRange?: { start: Date; end: Date; isFromCalendar: boolean } +) => { + return alerts.filter((alert) => { + // Only apply time range filter if both start and end dates exist + if (timeRange?.start && timeRange?.end) { + const lastReceived = new Date(alert.lastReceived); + const rangeStart = new Date(timeRange.start); + const rangeEnd = new Date(timeRange.end); + + if (!isQuickPresetRange(timeRange)) { + rangeEnd.setHours(23, 59, 59, 999); + } + + if (lastReceived < rangeStart || lastReceived > rangeEnd) { + return false; + } + } + + // Then apply facet filters, excluding the current facet + return Object.entries(facetFilters).every(([facetKey, includedValues]) => { + // Skip filtering by current facet to avoid circular dependency + if (facetKey === currentFacetKey || includedValues.length === 0) { + return true; + } + + let value; + if (facetKey.includes(".")) { + const [parentKey, childKey] = facetKey.split("."); + const parentValue = alert[parentKey as keyof AlertDto]; + + if ( + typeof parentValue === "object" && + parentValue !== null && + !Array.isArray(parentValue) && + !(parentValue instanceof Date) + ) { + value = (parentValue as Record)[childKey]; + } + } else { + value = alert[facetKey as keyof AlertDto]; + } + + if (facetKey === "source") { + const sources = value as string[]; + if (includedValues.includes("n/a")) { + return !sources || sources.length === 0; + } + return ( + Array.isArray(sources) && + sources.some((source) => includedValues.includes(source)) + ); + } + + if (includedValues.includes("n/a")) { + return value === null || value === undefined || value === ""; + } + + if (value === null || value === undefined || value === "") { + return false; + } + + return includedValues.includes(String(value)); + }); + }); +}; + +export const getSeverityOrder = (severity: string): number => { + switch (severity) { + case "low": + return 1; + case "info": + return 2; + case "warning": + return 3; + case "error": + case "high": + return 4; + case "critical": + return 5; + default: + return 6; + } +}; diff --git a/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-facet-value.tsx b/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-facet-value.tsx new file mode 100644 index 0000000000..b1cdd43b3a --- /dev/null +++ b/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-facet-value.tsx @@ -0,0 +1,206 @@ +import React, { useCallback, useMemo } from "react"; +import { Icon } from "@tremor/react"; +import { Text } from "@tremor/react"; +import { FacetValueProps } from "./alert-table-facet-types"; +import { getStatusIcon, getStatusColor } from "@/shared/lib/status-utils"; +import { BellIcon, BellSlashIcon, FireIcon } from "@heroicons/react/24/outline"; +import clsx from "clsx"; +import { useIncidents } from "@/utils/hooks/useIncidents"; +import { getIncidentName } from "@/entities/incidents/lib/utils"; +import { UserStatefulAvatar } from "@/entities/users/ui"; +import { useUser } from "@/entities/users/model/useUser"; +import { SeverityBorderIcon, UISeverity } from "@/shared/ui"; +import { DynamicImageProviderIcon } from "@/components/ui"; + +const AssigneeLabel = ({ email }: { email: string }) => { + const user = useUser(email); + return user ? user.name : email; +}; + +export const FacetValue: React.FC = ({ + label, + count, + isSelected, + onSelect, + facetKey, + showIcon = false, + facetFilters, +}) => { + const { data: incidents } = useIncidents( + { + candidate: false, + predicted: null, + limit: 100, + offset: undefined, + sorting: undefined, + cel: "", + }, + { + revalidateOnFocus: false, + } + ); + + const incidentMap = useMemo(() => { + return new Map( + incidents?.items.map((incident) => [ + incident.id.replaceAll("-", ""), + incident, + ]) || [] + ); + }, [incidents]); + + const incident = useMemo( + () => (facetKey === "incident" ? incidentMap.get(label) : null), + [incidentMap, facetKey, label] + ); + + const handleCheckboxClick = (e: React.MouseEvent) => { + e.stopPropagation(); + onSelect(label, false, false); + }; + + const isExclusivelySelected = () => { + const currentFilter = facetFilters[facetKey] || []; + return currentFilter.length === 1 && currentFilter[0] === label; + }; + + const handleActionClick = (e: React.MouseEvent) => { + e.stopPropagation(); + if (isExclusivelySelected()) { + onSelect("", false, true); + } else { + onSelect(label, true, true); + } + }; + + const getValueIcon = useCallback( + (label: string, facetKey: string) => { + if (facetKey === "source") { + return ( + + ); + } + if (facetKey === "severity") { + return ; + } + if (facetKey === "assignee") { + return ; + } + if (facetKey === "status") { + return ( + + ); + } + if (facetKey === "dismissed") { + return ( + + ); + } + if (facetKey === "incident") { + if (incident) { + return ( + + ); + } + return ( + + ); + } + return null; + }, + [incident] + ); + + const humanizeLabel = useCallback( + (label: string, facetKey: string) => { + if (facetKey === "assignee") { + if (label === "n/a") { + return "Not assigned"; + } + return ; + } + if (facetKey === "incident") { + if (label === "n/a") { + return "No incident"; + } + if (incident) { + return getIncidentName(incident); + } else { + return label; + } + } + if (facetKey === "dismissed") { + return label === "true" ? "Dismissed" : "Not dismissed"; + } + return {label}; + }, + [incident] + ); + + const currentFilter = facetFilters[facetKey] || []; + const isValueSelected = + !currentFilter?.length || currentFilter.includes(label); + + return ( +
+
+ {}} + style={{ accentColor: "#eb6221" }} + className="h-4 w-4 rounded border-gray-300 cursor-pointer" + /> +
+ +
+ {showIcon && ( +
+ {getValueIcon(label, facetKey)} +
+ )} + {humanizeLabel(label, facetKey)} +
+ +
+ + {count > 0 && ( + + {count} + + )} +
+
+ ); +}; diff --git a/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-facet.tsx b/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-facet.tsx new file mode 100644 index 0000000000..4b5d2d68ec --- /dev/null +++ b/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-facet.tsx @@ -0,0 +1,100 @@ +import React from "react"; +import { Title } from "@tremor/react"; +import { ChevronDownIcon, ChevronRightIcon } from "@heroicons/react/20/solid"; +import { FacetProps } from "./alert-table-facet-types"; +import { FacetValue } from "./alert-table-facet-value"; +import { useLocalStorage } from "@/utils/hooks/useLocalStorage"; +import { usePathname } from "next/navigation"; +import Skeleton from "react-loading-skeleton"; + +export const Facet: React.FC = ({ + name, + values, + onSelect, + facetKey, + facetFilters, + showIcon = true, + showSkeleton, +}) => { + const pathname = usePathname(); + // Get preset name from URL + const presetName = pathname?.split("/").pop() || "default"; + + // Store open/close state in localStorage with a unique key per preset and facet + const [isOpen, setIsOpen] = useLocalStorage( + `facet-${presetName}-${facetKey}-open`, + true + ); + + // Store filter value in localStorage per preset and facet + const [filter, setFilter] = useLocalStorage( + `facet-${presetName}-${facetKey}-filter`, + "" + ); + + const filteredValues = values.filter((v) => + v.label.toLowerCase().includes(filter.toLowerCase()) + ); + + const Icon = isOpen ? ChevronDownIcon : ChevronRightIcon; + + return ( +
+
setIsOpen(!isOpen)} + > +
+ + {name} +
+
+ + {isOpen && ( +
+ {values.length >= 10 && ( +
+ setFilter(e.target.value)} + className="w-full px-2 py-1 text-sm border border-gray-300 rounded" + /> +
+ )} +
+ {showSkeleton ? ( + Array.from({ length: 3 }).map((_, index) => ( +
+ + +
+ )) + ) : values.length > 0 ? ( + filteredValues.map((value) => ( + + )) + ) : ( +
+ No matching values found +
+ )} +
+
+ )} +
+ ); +}; diff --git a/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-tab-panel-server-side.tsx b/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-tab-panel-server-side.tsx new file mode 100644 index 0000000000..94c3b726c6 --- /dev/null +++ b/keep-ui/app/(keep)/alerts/[id]/ui/alert-table-tab-panel-server-side.tsx @@ -0,0 +1,106 @@ +import { FacetDto } from "@/features/filter"; +import { AlertTableServerSide } from "@/widgets/alerts-table/ui/alert-table-server-side"; +import { useAlertTableCols } from "@/widgets/alerts-table/lib/alert-table-utils"; +import { + AlertDto, + AlertKnownKeys, + AlertsQuery, + getTabsFromPreset, +} from "@/entities/alerts/model"; +import { Preset } from "@/entities/presets/model/types"; +import { AlertsTableDataQuery } from "@/widgets/alerts-table/ui/useAlertsTableData"; + +interface Props { + initialFacets: FacetDto[]; + alerts: AlertDto[]; + alertsTotalCount: number; + facetsCel: string | null; + facetsPanelRefreshToken: string | undefined; + preset: Preset; + isAsyncLoading: boolean; + setTicketModalAlert: (alert: AlertDto | null) => void; + setNoteModalAlert: (alert: AlertDto | null) => void; + setRunWorkflowModalAlert: (alert: AlertDto | null) => void; + setDismissModalAlert: (alert: AlertDto[] | null) => void; + setChangeStatusAlert: (alert: AlertDto | null) => void; + mutateAlerts: () => void; + onReload?: (query: AlertsQuery) => void; + onQueryChange?: (query: AlertsTableDataQuery) => void; +} + +export default function AlertTableTabPanelServerSide({ + initialFacets, + alerts, + alertsTotalCount, + preset, + facetsCel, + facetsPanelRefreshToken, + isAsyncLoading, + setTicketModalAlert, + setNoteModalAlert, + setRunWorkflowModalAlert, + setDismissModalAlert, + setChangeStatusAlert, + mutateAlerts, + onReload, + onQueryChange, +}: Props) { + const additionalColsToGenerate = [ + ...new Set( + alerts?.flatMap((alert) => { + const keys = Object.keys(alert).filter( + (key) => !AlertKnownKeys.includes(key) + ); + return keys.flatMap((key) => { + if ( + typeof alert[key as keyof AlertDto] === "object" && + alert[key as keyof AlertDto] !== null + ) { + return Object.keys(alert[key as keyof AlertDto] as object).map( + (subKey) => `${key}.${subKey}` + ); + } + return key; + }); + }) || [] + ), + ]; + + const alertTableColumns = useAlertTableCols({ + additionalColsToGenerate: additionalColsToGenerate, + isCheckboxDisplayed: + preset.name !== "deleted" && preset.name !== "dismissed", + isMenuDisplayed: true, + setTicketModalAlert: setTicketModalAlert, + setNoteModalAlert: setNoteModalAlert, + setRunWorkflowModalAlert: setRunWorkflowModalAlert, + setDismissModalAlert: setDismissModalAlert, + setChangeStatusAlert: setChangeStatusAlert, + presetName: preset.name, + presetNoisy: preset.is_noisy, + }); + + const presetTabs = getTabsFromPreset(preset); + + return ( + + ); +} diff --git a/keep-ui/app/(keep)/alerts/[id]/ui/alerts.tsx b/keep-ui/app/(keep)/alerts/[id]/ui/alerts.tsx new file mode 100644 index 0000000000..ed70b60635 --- /dev/null +++ b/keep-ui/app/(keep)/alerts/[id]/ui/alerts.tsx @@ -0,0 +1,263 @@ +"use client"; + +import { useCallback, useEffect, useMemo, useRef, useState } from "react"; +import { useRouter, useSearchParams } from "next/navigation"; +import { type AlertDto, type AlertsQuery } from "@/entities/alerts/model"; +import { usePresets, type Preset } from "@/entities/presets/model"; +import { AlertHistoryModal } from "@/features/alerts/alert-history"; +import { AlertAssignTicketModal } from "@/features/alerts/alert-assign-ticket"; +import { AlertNoteModal } from "@/features/alerts/alert-note"; +import { AlertMethodModal } from "@/features/alerts/alert-call-provider-method"; +import { ManualRunWorkflowModal } from "@/features/workflows/manual-run-workflow"; +import { AlertDismissModal } from "@/features/alerts/dismiss-alert"; +import { ViewAlertModal } from "@/features/alerts/view-raw-alert"; +import { AlertChangeStatusModal } from "@/features/alerts/alert-change-status"; +import { EnrichAlertSidePanel } from "@/features/alerts/enrich-alert"; +import { FacetDto } from "@/features/filter"; +import { useApi } from "@/shared/lib/hooks/useApi"; +import { KeepLoader, showErrorToast } from "@/shared/ui"; +import NotFound from "@/app/(keep)/not-found"; +import AlertTableTabPanelServerSide from "./alert-table-tab-panel-server-side"; +import { useProviders } from "@/utils/hooks/useProviders"; +import { + useAlertsTableData, + AlertsTableDataQuery, +} from "@/widgets/alerts-table/ui/useAlertsTableData"; + +const defaultPresets: Preset[] = [ + { + id: "11111111-1111-1111-1111-111111111111", // FEED_PRESET_ID + name: "feed", + options: [], + is_private: false, + is_noisy: false, + alerts_count: 0, + should_do_noise_now: false, + tags: [], + counter_shows_firing_only: false, + }, +]; + +type AlertsProps = { + initialFacets: FacetDto[]; + presetName: string; +}; + +export default function Alerts({ presetName, initialFacets }: AlertsProps) { + const api = useApi(); + const [alertsQueryState, setAlertsQueryState] = useState< + AlertsQuery | undefined + >(); + const [alertsTableDataQuery, setAlertsTableDataQuery] = + useState(); + const { data: providersData = { installed_providers: [] } } = useProviders(); + const router = useRouter(); + + const ticketingProviders = useMemo( + () => + providersData.installed_providers.filter((provider) => + provider.tags.includes("ticketing") + ), + [providersData.installed_providers] + ); + + const searchParams = useSearchParams(); + // hooks for the note and ticket modals + const [noteModalAlert, setNoteModalAlert] = useState(); + const [ticketModalAlert, setTicketModalAlert] = useState(); + const [runWorkflowModalAlert, setRunWorkflowModalAlert] = + useState(); + const [dismissModalAlert, setDismissModalAlert] = useState< + AlertDto[] | null + >(); + const [changeStatusAlert, setChangeStatusAlert] = useState(); + const [viewAlertModal, setViewAlertModal] = useState(); + const [viewEnrichAlertModal, setEnrichAlertModal] = + useState(); + const [isEnrichSidebarOpen, setIsEnrichSidebarOpen] = useState(false); + const { dynamicPresets: savedPresets = [], isLoading: _isPresetsLoading } = + usePresets({ + revalidateOnFocus: false, + }); + const isPresetsLoading = _isPresetsLoading || !api.isReady(); + const presets = [...defaultPresets, ...savedPresets] as const; + + const selectedPreset = presets.find( + (preset) => preset.name.toLowerCase() === decodeURIComponent(presetName) + ); + + const { + alerts, + alertsLoading, + mutateAlerts, + alertsError: alertsError, + totalCount, + facetsCel, + facetsPanelRefreshToken, + } = useAlertsTableData(alertsTableDataQuery); + + // Track which fingerprint has already been resolved so that a background + // alerts re-fetch (polling / WebSocket) doesn't fire "not found" after the + // modal was successfully opened. + const resolvedFingerprintRef = useRef(null); + + useEffect(() => { + const fingerprint = searchParams?.get("alertPayloadFingerprint"); + const enrich = searchParams?.get("enrich"); + + // Reset when the user navigates to a different fingerprint. + if (fingerprint !== resolvedFingerprintRef.current) { + resolvedFingerprintRef.current = null; + } + + // Only act once data is actually settled: either we have alerts to search + // through, or the backend confirmed there are zero results (totalCount === 0). + // This guards against a 3-render cascade in useLastAlerts where `alerts` + // briefly equals [] while `isLoading` is already false but the React state + // carrying the actual results hasn't been flushed yet. + const dataSettled = alerts && !alertsLoading && (alerts.length > 0 || totalCount === 0); + + if (fingerprint && enrich && dataSettled) { + const alert = alerts?.find((alert) => alert.fingerprint === fingerprint); + if (alert) { + resolvedFingerprintRef.current = fingerprint; + setEnrichAlertModal(alert); + setIsEnrichSidebarOpen(true); + } else if (!resolvedFingerprintRef.current) { + showErrorToast(null, "Alert fingerprint not found"); + resetUrlAfterModal(); + } + } else if (fingerprint && dataSettled) { + const alert = alerts?.find((alert) => alert.fingerprint === fingerprint); + if (alert) { + resolvedFingerprintRef.current = fingerprint; + setViewAlertModal(alert); + } else if (!resolvedFingerprintRef.current) { + showErrorToast(null, "Alert fingerprint not found"); + resetUrlAfterModal(); + } + } else if (alerts && !alertsLoading) { + resolvedFingerprintRef.current = null; + setViewAlertModal(null); + setEnrichAlertModal(null); + setIsEnrichSidebarOpen(false); + } + }, [searchParams, alerts, alertsLoading, totalCount]); + + const alertsQueryStateRef = useRef(alertsQueryState); + + const reloadAlerts = useCallback( + (alertsQuery: AlertsQuery) => { + // if the query is the same as the last one, just refetch + if ( + JSON.stringify(alertsQuery) === + JSON.stringify(alertsQueryStateRef.current) + ) { + mutateAlerts(); + return; + } + + // if the query is different, update the state + setAlertsQueryState(alertsQuery); + alertsQueryStateRef.current = alertsQuery; + }, + [setAlertsQueryState] + ); + + const resetUrlAfterModal = useCallback(() => { + const currentParams = new URLSearchParams(window.location.search); + Array.from(currentParams.keys()) + .filter((paramKey) => paramKey !== "cel") + .forEach((paramKey) => currentParams.delete(paramKey)); + let url = `${window.location.pathname}`; + + if (currentParams.toString()) { + url += `?${currentParams.toString()}`; + } + + router.replace(url); + }, [router]); + + // if we don't have presets data yet, just show loading + if (!selectedPreset && isPresetsLoading) { + return ; + } + + // if we have an error, throw it, error.tsx will catch it + if (alertsError) { + throw alertsError; + } + + if (!selectedPreset) { + return ; + } + + return ( + <> + + + setDismissModalAlert(null)} + /> + setChangeStatusAlert(null)} + /> + + setTicketModalAlert(null)} + ticketingProviders={ticketingProviders} + alert={ticketModalAlert ?? null} + /> + setNoteModalAlert(null)} + alert={noteModalAlert ?? null} + /> + setRunWorkflowModalAlert(null)} + /> + resetUrlAfterModal()} + mutate={mutateAlerts} + /> + { + setIsEnrichSidebarOpen(false); + resetUrlAfterModal(); + }} + mutate={mutateAlerts} + /> + + ); +} diff --git a/keep-ui/app/(keep)/dashboard/GridItem.tsx b/keep-ui/app/(keep)/dashboard/GridItem.tsx new file mode 100644 index 0000000000..86122e3fa2 --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/GridItem.tsx @@ -0,0 +1,56 @@ +import React, { useState } from "react"; +import { Card } from "@tremor/react"; +import MenuButton from "./MenuButton"; +import { WidgetData } from "./types"; +import PresetGridItem from "./widget-types/preset/preset-grid-item"; +import MetricGridItem from "./widget-types/metric/metric-grid-item"; +import GenericMetricsGridItem from "./widget-types/generic-metrics/generic-metrics-grid-item"; + +interface GridItemProps { + item: WidgetData; + onEdit: (id: string, updateData?: WidgetData) => void; + onDelete: (id: string) => void; + onSave: (updateItem: WidgetData) => void; +} + +const GridItem: React.FC = ({ + item, + onEdit, + onDelete, + onSave, +}) => { + const [updatedItem, setUpdatedItem] = useState(item); + + const handleEdit = () => { + onEdit(updatedItem.i, updatedItem); + }; + + return ( + +
+
+ + {item.name} + + onDelete(item.i)} + onSave={() => { + onSave(updatedItem); + }} + /> +
+ {item.preset && } + {item.metric && } + {item.genericMetrics && ( + + )} +
+
+ ); +}; + +export default GridItem; diff --git a/keep-ui/app/(keep)/dashboard/GridItemContainer.tsx b/keep-ui/app/(keep)/dashboard/GridItemContainer.tsx new file mode 100644 index 0000000000..0f0d2d5046 --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/GridItemContainer.tsx @@ -0,0 +1,28 @@ +import React from "react"; +import GridItem from "./GridItem"; +import { WidgetData } from "./types"; + +interface GridItemContainerProps { + item: WidgetData; + onEdit: (id: string) => void; + onDelete: (id: string) => void; + onSave: (updateItem: WidgetData) => void; +} + +const GridItemContainer: React.FC = ({ + item, + onEdit, + onDelete, + onSave, +}) => { + return ( + onEdit(item.i)} + onDelete={() => onDelete(item.i)} + onSave={onSave} + /> + ); +}; + +export default GridItemContainer; diff --git a/keep-ui/app/(keep)/dashboard/GridLayout.tsx b/keep-ui/app/(keep)/dashboard/GridLayout.tsx new file mode 100644 index 0000000000..a5a39a7072 --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/GridLayout.tsx @@ -0,0 +1,89 @@ +import React from "react"; +import { Responsive, WidthProvider, Layout } from "react-grid-layout"; +import GridItemContainer from "./GridItemContainer"; +import { LayoutItem, WidgetData } from "./types"; +import "react-grid-layout/css/styles.css"; +import { MetricsWidget } from "@/utils/hooks/useDashboardMetricWidgets"; +import { Preset } from "@/entities/presets/model/types"; + +const ResponsiveGridLayout = WidthProvider(Responsive); + +interface GridLayoutProps { + layout: LayoutItem[]; + onLayoutChange: (layout: LayoutItem[]) => void; + data: WidgetData[]; + onEdit: (id: string) => void; + onDelete: (id: string) => void; + presets: Preset[]; + onSave: (updateItem: WidgetData) => void; + metrics: MetricsWidget[]; +} + +const GridLayout: React.FC = ({ + layout, + onLayoutChange, + data, + onEdit, + onDelete, + onSave, + presets, + metrics, +}) => { + const layouts = { lg: layout }; + + return ( + <> + { + const updatedLayout = currentLayout.map((item) => ({ + ...item, + static: item.static ?? false, // Ensure static is a boolean + })); + onLayoutChange(updatedLayout as LayoutItem[]); + }} + breakpoints={{ lg: 1200, md: 996, sm: 768, xs: 480, xxs: 0 }} + cols={{ lg: 24, md: 20, sm: 12, xs: 8, xxs: 4 }} + rowHeight={30} + containerPadding={[0, 0]} + margin={[10, 10]} + useCSSTransforms={true} + isDraggable={true} + isResizable={true} + compactType={null} + draggableHandle=".grid-item__widget" + transformScale={1} + > + {data.map((item) => { + //Updating the static hardcode db value. + if (item.preset) { + const preset = presets?.find((p) => p?.id === item?.preset?.id); + item.preset = { + ...item.preset, + alerts_count: preset?.alerts_count ?? 0, + }; + } else if (item.metric) { + const metric = metrics?.find((m) => m?.id === item?.metric?.id); + if (metric) { + item.metric = { ...metric }; + } + } + return ( +
+ +
+ ); + })} +
+ + ); +}; + +export default GridLayout; diff --git a/keep-ui/app/(keep)/dashboard/MenuButton.tsx b/keep-ui/app/(keep)/dashboard/MenuButton.tsx new file mode 100644 index 0000000000..6ee7302ca6 --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/MenuButton.tsx @@ -0,0 +1,108 @@ +import React, { Fragment } from "react"; +import { Menu, Transition } from "@headlessui/react"; +import { Icon } from "@tremor/react"; +import { PencilIcon, TrashIcon } from "@heroicons/react/24/outline"; +import { Bars3Icon } from "@heroicons/react/20/solid"; +import { FiSave } from "react-icons/fi"; + +interface MenuButtonProps { + onEdit: () => void; + onDelete: () => void; + onSave?: () => void; +} + +const MenuButton: React.FC = ({ + onEdit, + onDelete, + onSave, +}) => { + const stopPropagation = (e: React.MouseEvent) => { + e.stopPropagation(); + }; + + return ( +
+ +
+ + + +
+ + +
+ + {({ active }) => ( + + )} + + + {({ active }) => ( + + )} + + {onSave && ( + + {({ active }) => ( + + )} + + )} +
+
+
+
+
+ ); +}; + +export default MenuButton; diff --git a/keep-ui/app/(keep)/dashboard/WidgetModal.tsx b/keep-ui/app/(keep)/dashboard/WidgetModal.tsx new file mode 100644 index 0000000000..764bcbc232 --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/WidgetModal.tsx @@ -0,0 +1,183 @@ +import React, { useState } from "react"; +import Modal from "@/components/ui/Modal"; +import { Button, Select, SelectItem, Subtitle, TextInput } from "@tremor/react"; +import { WidgetData, WidgetType } from "./types"; +import { Controller, get, useForm, useWatch } from "react-hook-form"; +import { MetricsWidget } from "@/utils/hooks/useDashboardMetricWidgets"; +import { Preset } from "@/entities/presets/model/types"; +import { PresetWidgetForm } from "./widget-types/preset/preset-widget-form"; +import { MetricWidgetForm } from "./widget-types/metric/metric-widget-form"; +import { GenericMetricsWidgetForm } from "./widget-types/generic-metrics/generic-metrics-widget-form"; + +interface WidgetForm { + widgetName: string; + widgetType: WidgetType; +} + +interface WidgetModalProps { + isOpen: boolean; + onClose: () => void; + onAddWidget: (widget: any) => void; + onEditWidget: (updatedWidget: WidgetData) => void; + presets: Preset[]; + editingItem?: WidgetData | null; + metricWidgets: MetricsWidget[]; +} + +const WidgetModal: React.FC = ({ + isOpen, + onClose, + onAddWidget, + onEditWidget, + presets, + editingItem, + metricWidgets, +}) => { + const [innerFormState, setInnerFormState] = useState<{ + isValid: boolean; + formValue: any; + }>({ isValid: false, formValue: {} }); + + const { + control, + handleSubmit, + formState: { errors, isValid }, + reset, + } = useForm({ + defaultValues: { + widgetName: editingItem?.name || "", + widgetType: editingItem?.widgetType || WidgetType.PRESET, + }, + }); + + const widgetType = useWatch({ + control, + name: "widgetType", + }); + + const onSubmit = (data: WidgetForm) => { + if (editingItem) { + let updatedWidget: WidgetData = { + ...editingItem, + name: data.widgetName, + widgetType: data.widgetType || WidgetType.PRESET, // backwards compatibility + ...innerFormState.formValue, + }; + onEditWidget(updatedWidget); + } else { + onAddWidget({ + name: data.widgetName, + widgetType: data.widgetType || WidgetType.PRESET, // backwards compatibility + ...innerFormState.formValue, + }); + // cleanup form + reset({ + widgetName: "", + widgetType: WidgetType.PRESET, + }); + } + onClose(); + }; + + return ( + +
+
+ Widget Name + ( + + )} + /> +
+
+ Widget Type + { + return ( + + ); + }} + /> +
+ {widgetType === WidgetType.PRESET && ( + + setInnerFormState({ formValue, isValid }) + } + > + )} + {widgetType == WidgetType.GENERICS_METRICS && ( + <> + + setInnerFormState({ formValue, isValid }) + } + > + + )} + {widgetType === WidgetType.METRIC && ( + + setInnerFormState({ formValue, isValid }) + } + > + )} + +
+
+ ); +}; + +export default WidgetModal; diff --git a/keep-ui/app/(keep)/dashboard/[id]/dashboard.tsx b/keep-ui/app/(keep)/dashboard/[id]/dashboard.tsx new file mode 100644 index 0000000000..9f6c482ab9 --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/[id]/dashboard.tsx @@ -0,0 +1,241 @@ +"use client"; +import { useParams } from "next/navigation"; +import { ChangeEvent, useEffect, useState } from "react"; +import GridLayout from "../GridLayout"; +import WidgetModal from "../WidgetModal"; +import { Button, Card, Icon, Subtitle, TextInput } from "@tremor/react"; +import { + GenericsMetrics, + LayoutItem, + Threshold, + WidgetData, + WidgetType, +} from "../types"; +import { FiEdit2, FiSave } from "react-icons/fi"; +import { useDashboards } from "utils/hooks/useDashboards"; +import { toast } from "react-toastify"; +import { GenericFilters } from "@/components/filters/GenericFilters"; +import { useDashboardPreset } from "utils/hooks/useDashboardPresets"; +import { + MetricsWidget, + useDashboardMetricWidgets, +} from "@/utils/hooks/useDashboardMetricWidgets"; +import { useApi } from "@/shared/lib/hooks/useApi"; +import { showErrorToast } from "@/shared/ui"; +import "../styles.css"; +import { Preset } from "@/entities/presets/model/types"; + +const DASHBOARD_FILTERS = [ + { + type: "date", + key: "time_stamp", + value: "", + name: "Last received", + }, +]; + +const DashboardPage = () => { + const api = useApi(); + const allPresets = useDashboardPreset(); + const { id }: any = useParams(); + const { dashboards, isLoading, mutate: mutateDashboard } = useDashboards(); + const [isModalOpen, setIsModalOpen] = useState(false); + const [layout, setLayout] = useState([]); + const [widgetData, setWidgetData] = useState([]); + const { widgets: allMetricWidgets } = useDashboardMetricWidgets(true); + const [editingItem, setEditingItem] = useState(null); + const [dashboardName, setDashboardName] = useState(decodeURIComponent(id)); + const [isEditingName, setIsEditingName] = useState(false); + + useEffect(() => { + if (!isLoading) { + const dashboard = dashboards?.find( + (d) => d.dashboard_name === decodeURIComponent(id) + ); + if (dashboard) { + setLayout(dashboard.dashboard_config.layout); + setWidgetData(dashboard.dashboard_config.widget_data); + setDashboardName(dashboard.dashboard_name); + } + } + }, [id, dashboards, isLoading]); + + const openModal = () => { + setEditingItem(null); // Ensure new modal opens without editing item context + setIsModalOpen(true); + }; + const closeModal = () => setIsModalOpen(false); + + const handleAddWidget = (widget: any) => { + const uniqueId = `w-${Date.now()}`; + const newItem: LayoutItem = { + i: uniqueId, + x: 0, + y: 0, + w: 3, + h: 3, + minW: 2, + minH: 3, + static: false, + }; + const newWidget: WidgetData = { + ...newItem, + ...widget, + }; + setLayout((prevLayout) => [...prevLayout, newWidget]); + setWidgetData((prevData) => [...prevData, newWidget]); + }; + + const handleEditWidget = (id: string, update?: WidgetData) => { + let itemToEdit = widgetData.find((d) => d.i === id) || null; + if (itemToEdit && update) { + setEditingItem({ ...itemToEdit, ...update }); + } else { + setEditingItem(itemToEdit); + } + setIsModalOpen(true); + }; + + const handleSaveEdit = (updatedItem: WidgetData) => { + setWidgetData((prevData) => + prevData.map((item) => (item.i === updatedItem.i ? updatedItem : item)) + ); + closeModal(); + }; + + const handleDeleteWidget = (id: string) => { + setLayout(layout.filter((item) => item.i !== id)); + setWidgetData(widgetData.filter((item) => item.i !== id)); + }; + + const handleLayoutChange = (newLayout: LayoutItem[]) => { + setLayout(newLayout); + setWidgetData((prevData) => + prevData.map((item) => { + const newItem = newLayout.find((l) => l.i === item.i); + return newItem ? { ...item, ...newItem } : item; + }) + ); + }; + + const handleSaveDashboard = async () => { + try { + let dashboard = dashboards?.find( + (d) => d.dashboard_name === decodeURIComponent(id) + ); + const method = dashboard ? "PUT" : "POST"; + const endpoint = `/dashboard${ + dashboard ? `/${encodeURIComponent(dashboard.id)}` : "" + }`; + + const result = await api.post( + endpoint, + { + dashboard_name: dashboardName, + dashboard_config: { + layout, + widget_data: widgetData, + }, + }, + { + method, + } + ); + + console.log("Dashboard saved successfully", result); + mutateDashboard(); + toast.success("Dashboard saved successfully"); + } catch (error) { + showErrorToast(error, "Failed to save dashboard"); + } + }; + + const toggleEditingName = () => { + setIsEditingName(!isEditingName); + }; + + const handleNameChange = (e: ChangeEvent) => { + setDashboardName(e.target.value); + }; + + return ( +
+
+
+ {isEditingName ? ( + + ) : ( + + {dashboardName} + + )} + +
+
+ +
+ +
+
+
+ {layout.length === 0 ? ( + +
+

No widgets available

+

Click to add your first widget

+
+
+ ) : ( + + + + )} + {isModalOpen && ( + + )} +
+ ); +}; + +export default DashboardPage; diff --git a/keep-ui/app/(keep)/dashboard/[id]/page.tsx b/keep-ui/app/(keep)/dashboard/[id]/page.tsx new file mode 100644 index 0000000000..ac83702608 --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/[id]/page.tsx @@ -0,0 +1,10 @@ +import DashboardPage from "./dashboard"; + +export default function Page() { + return ; +} + +export const metadata = { + title: "Keep - Dashboards", + description: "Single pane of glass for all your alerts.", +}; diff --git a/keep-ui/app/(keep)/dashboard/alert-quality-table.tsx b/keep-ui/app/(keep)/dashboard/alert-quality-table.tsx new file mode 100644 index 0000000000..7730b2fca2 --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/alert-quality-table.tsx @@ -0,0 +1,377 @@ +"use client"; // Add this line at the top to make this a Client Component + +import React, { + useState, + useEffect, + Dispatch, + SetStateAction, + useMemo, +} from "react"; +import { GenericTable } from "@/components/table/GenericTable"; +import { useAlertQualityMetrics } from "@/utils/hooks/useAlertQuality"; +import { useProviders } from "@/utils/hooks/useProviders"; +import { Provider, ProvidersResponse } from "@/shared/api/providers"; +import { TabGroup, TabList, Tab, Callout } from "@tremor/react"; +import { GenericFilters } from "@/components/filters/GenericFilters"; +import { useSearchParams } from "next/navigation"; +import { AlertKnownKeys } from "@/entities/alerts/model"; +import { createColumnHelper, DisplayColumnDef } from "@tanstack/react-table"; +import { ExclamationCircleIcon } from "@heroicons/react/20/solid"; + +const tabs = [ + { name: "All", value: "all" }, + { name: "Installed", value: "installed" }, + { name: "Linked", value: "linked" }, +]; + +const ALERT_QUALITY_FILTERS = [ + { + type: "date", + key: "time_stamp", + value: "", + name: "Last received", + }, +]; + +const FilterTabs = ({ + tabs, + setTab, + tab, +}: { + tabs: { name: string; value: string }[]; + setTab: Dispatch>; + tab: number; +}) => { + return ( +
+ { + setTab(index); + }} + > + + {tabs.map((tabItem) => ( + {tabItem.name} + ))} + + +
+ ); +}; + +interface AlertMetricQuality { + alertsReceived: number; + alertsCorrelatedToIncidentsPercentage: number; + alertsWithSeverityPercentage: number; + [key: string]: number; +} + +type FinalAlertQuality = Provider & + AlertMetricQuality & { provider_display_name: string }; +interface Pagination { + limit: number; + offset: number; +} + +const QualityTable = ({ + providersMeta, + alertsQualityMetrics, + isDashBoard, + setFields, + fieldsValue, +}: { + providersMeta: ProvidersResponse | undefined; + alertsQualityMetrics: Record> | undefined; + isDashBoard?: boolean; + setFields: (fields: string | string[] | Record) => void; + fieldsValue: string | string[] | Record; +}) => { + const [pagination, setPagination] = useState({ + limit: 10, + offset: 0, + }); + const customFieldFilter = { + type: "select", + key: "fields", + value: isDashBoard ? fieldsValue : "", + name: "Field", + options: AlertKnownKeys.map((key) => ({ value: key, label: key })), + // only_one: true, + searchParamsNotNeed: isDashBoard, + can_select: 3, + setFilter: setFields, + }; + const searchParams = useSearchParams(); + const entries = searchParams ? Array.from(searchParams.entries()) : []; + const columnHelper = createColumnHelper(); + + const params = entries.reduce( + (acc, [key, value]) => { + if (key in acc) { + if (Array.isArray(acc[key])) { + acc[key] = [...acc[key], value]; + return acc; + } else { + acc[key] = [acc[key] as string, value]; + } + return acc; + } + acc[key] = value; + return acc; + }, + {} as Record + ); + function toArray(value: string | string[]) { + if (!value) { + return []; + } + + if (!Array.isArray(value) && value) { + return [value]; + } + + return value; + } + const fields = toArray( + params?.["fields"] || (fieldsValue as string | string[]) || [] + ) as string[]; + const [tab, setTab] = useState(0); + + const handlePaginationChange = (newLimit: number, newOffset: number) => { + setPagination({ limit: newLimit, offset: newOffset }); + }; + + useEffect(() => { + handlePaginationChange(10, 0); + }, [tab, searchParams?.toString()]); + + // Construct columns based on the fields selected + const columns = useMemo(() => { + const baseColumns = [ + columnHelper.display({ + id: "provider_display_name", + header: "Provider Name", + cell: ({ row }) => { + const displayName = row.original.provider_display_name; + return ( +
+
{displayName}
+
id: {row.original.id}
+
type: {row.original.type}
+
+ ); + }, + }), + columnHelper.accessor("alertsReceived", { + id: "alertsReceived", + header: "Alerts Received", + }), + columnHelper.display({ + id: "alertsCorrelatedToIncidentsPercentage", + header: "% of Alerts Correlated to Incidents", + cell: ({ row }) => { + return `${row.original.alertsCorrelatedToIncidentsPercentage.toFixed( + 2 + )}%`; + }, + }), + ] as DisplayColumnDef[]; + + // Add dynamic columns based on the fields + const dynamicColumns = fields.map((field: string) => + columnHelper.accessor( + `alertsWith${field.charAt(0).toUpperCase() + field.slice(1)}Percentage`, + { + id: `alertsWith${ + field.charAt(0).toUpperCase() + field.slice(1) + }Percentage`, + header: `% of Alerts Having ${ + field.charAt(0).toUpperCase() + field.slice(1) + }`, + cell: (info: any) => `${info.getValue().toFixed(2)}%`, + } + ) + ) as DisplayColumnDef[]; + + return [ + ...baseColumns, + ...dynamicColumns, + ] as DisplayColumnDef[]; + }, [fields]); + + // Process data and include dynamic fields + const finalData = useMemo(() => { + let providers: Provider[] | null = null; + + if (!providersMeta || !alertsQualityMetrics) { + return null; + } + + switch (tab) { + case 0: + providers = [ + ...providersMeta?.installed_providers, + ...providersMeta?.linked_providers, + ]; + break; + case 1: + providers = providersMeta?.installed_providers || []; + break; + case 2: + providers = providersMeta?.linked_providers || []; + break; + default: + providers = [ + ...providersMeta?.installed_providers, + ...providersMeta?.linked_providers, + ]; + break; + } + + if (!providers) { + return null; + } + + function getProviderDisplayName(provider: Provider) { + return ( + (provider?.details?.name + ? `${provider.details.name} (${provider.display_name})` + : provider.display_name) || provider.type + ); + } + + const innerData: FinalAlertQuality[] = providers.map((provider) => { + const providerId = provider.id; + const providerType = provider.type; + const key = `${providerId}_${providerType}`; + const alertQuality = alertsQualityMetrics[key]; + const totalAlertsReceived = alertQuality?.total_alerts ?? 0; + const correlated_alerts = alertQuality?.correlated_alerts ?? 0; + const correltedPert = + totalAlertsReceived && correlated_alerts + ? (correlated_alerts / totalAlertsReceived) * 100 + : 0; + const severityPert = totalAlertsReceived + ? ((alertQuality?.severity_count ?? 0) / totalAlertsReceived) * 100 + : 0; + + // Calculate percentages for dynamic fields + const dynamicFieldPercentages = fields.reduce( + (acc, field: string) => { + acc[ + `alertsWith${ + field.charAt(0).toUpperCase() + field.slice(1) + }Percentage` + ] = totalAlertsReceived + ? ((alertQuality?.[`${field}_count`] ?? 0) / totalAlertsReceived) * + 100 + : 0; + return acc; + }, + {} as Record + ); + + return { + ...provider, + alertsReceived: totalAlertsReceived, + alertsCorrelatedToIncidentsPercentage: correltedPert, + alertsWithSeverityPercentage: severityPert, + ...dynamicFieldPercentages, // Add dynamic field percentages here + provider_display_name: getProviderDisplayName(provider), + } as FinalAlertQuality; + }); + + return innerData; + }, [tab, providersMeta, alertsQualityMetrics, fields]); + + return ( +
+
+ {!isDashBoard && ( +

+ Alert Quality Dashboard +

+ )} +
+
+ +
+ +
+
+ {finalData && ( + + data={finalData} + columns={columns} + rowCount={finalData?.length} + offset={pagination.offset} + limit={pagination.limit} + onPaginationChange={handlePaginationChange} + dataFetchedAtOneGO={true} + onRowClick={(row) => { + console.log("Row clicked:", row); + }} + /> + )} +
+ ); +}; + +const AlertQuality = ({ + isDashBoard, + filters, + setFilters, +}: { + isDashBoard?: boolean; + filters: { + [x: string]: string | string[]; + }; + setFilters: any; +}) => { + const fieldsValue = filters?.fields || ""; + const { data: providersMeta } = useProviders(); + const { data: alertsQualityMetrics, error } = useAlertQualityMetrics( + isDashBoard ? (fieldsValue as string | string[]) : "" + ); + + if (error) { + return ( + + Failed to load Alert Quality Metrics + + ); + } + + return ( + { + setFilters((filters: any) => { + return { + ...filters, + fields: field, + }; + }); + }} + fieldsValue={fieldsValue} + /> + ); +}; + +export default AlertQuality; diff --git a/keep-ui/app/dashboard/styles.css b/keep-ui/app/(keep)/dashboard/styles.css similarity index 99% rename from keep-ui/app/dashboard/styles.css rename to keep-ui/app/(keep)/dashboard/styles.css index f3e0c0e55d..dc1dbc1264 100644 --- a/keep-ui/app/dashboard/styles.css +++ b/keep-ui/app/(keep)/dashboard/styles.css @@ -1,4 +1,3 @@ - .grid-item__widget:hover { cursor: move; } diff --git a/keep-ui/app/(keep)/dashboard/types.tsx b/keep-ui/app/(keep)/dashboard/types.tsx new file mode 100644 index 0000000000..e898a095c0 --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/types.tsx @@ -0,0 +1,52 @@ +import { MetricsWidget } from "@/utils/hooks/useDashboardMetricWidgets"; +import { Preset } from "@/entities/presets/model/types"; + +export interface LayoutItem { + i: string; + x: number; + y: number; + w: number; + h: number; + minW?: number; + minH?: number; + static: boolean; +} + +export interface GenericsMetrics { + key: string; + label: string; + widgetType: "table" | "chart"; + meta: { + defaultFilters: { + [key: string]: string | string[]; + }; + }; +} + +export enum WidgetType { + PRESET = "PRESET", + METRIC = "METRIC", + GENERICS_METRICS = "GENERICS_METRICS", +} + +export enum PresetPanelType { + ALERT_TABLE = "ALERT_TABLE", + ALERT_COUNT_PANEL = "ALERT_COUNT_PANEL", +} + +export interface WidgetData extends LayoutItem { + thresholds?: Threshold[]; + preset?: Preset; + name: string; + widgetType: WidgetType; + genericMetrics?: GenericsMetrics; + metric?: MetricsWidget; + presetPanelType?: PresetPanelType; + showFiringOnly?: boolean; + customLink?: string; +} + +export interface Threshold { + value: number; + color: string; +} diff --git a/keep-ui/app/(keep)/dashboard/widget-types/generic-metrics/generic-metrics-grid-item.tsx b/keep-ui/app/(keep)/dashboard/widget-types/generic-metrics/generic-metrics-grid-item.tsx new file mode 100644 index 0000000000..19c27737d1 --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/widget-types/generic-metrics/generic-metrics-grid-item.tsx @@ -0,0 +1,57 @@ +import React, { useEffect, useState } from "react"; +import { WidgetData } from "../../types"; +import AlertQuality from "@/app/(keep)/dashboard/alert-quality-table"; + +interface GridItemProps { + item: WidgetData; + onEdit: (updatedItem: WidgetData) => void; +} + +const GenericMetricsGridItem: React.FC = ({ item, onEdit }) => { + const [filters, setFilters] = useState({ + ...(item?.genericMetrics?.meta?.defaultFilters || {}), + }); + + useEffect(() => { + let meta; + + if (item?.genericMetrics?.meta) { + meta = { + ...item.genericMetrics.meta, + defaultFilters: filters || {}, + }; + } + + const updatedItem = { + ...item, + genericMetrics: { + ...item.genericMetrics, + meta, + }, + }; + + onEdit(updatedItem as WidgetData); + }, [filters]); + + function renderGenericMetrics() { + switch (item?.genericMetrics?.key) { + case "alert_quality": + return ( + + ); + + default: + return null; + } + } + + return ( +
{renderGenericMetrics()}
+ ); +}; + +export default GenericMetricsGridItem; diff --git a/keep-ui/app/(keep)/dashboard/widget-types/generic-metrics/generic-metrics-widget-form.tsx b/keep-ui/app/(keep)/dashboard/widget-types/generic-metrics/generic-metrics-widget-form.tsx new file mode 100644 index 0000000000..4689c7fc8d --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/widget-types/generic-metrics/generic-metrics-widget-form.tsx @@ -0,0 +1,96 @@ +import { Select, SelectItem, Subtitle } from "@tremor/react"; +import { useEffect } from "react"; +import { Controller, get, useForm, useWatch } from "react-hook-form"; +import { GenericsMetrics, LayoutItem } from "../../types"; + +const GENERIC_METRICS = [ + { + key: "alert_quality", + label: "Alert Quality", + widgetType: "table", + meta: { + defaultFilters: { fields: "severity" }, + }, + }, +] as GenericsMetrics[]; + +interface GenericMetricsForm { + selectedGenericMetrics: string; +} + +export interface GenericMetricsWidgetFormProps { + editingItem?: any; + onChange: (formState: any, isValid: boolean) => void; +} + +export const GenericMetricsWidgetForm: React.FC< + GenericMetricsWidgetFormProps +> = ({ editingItem, onChange }) => { + const { + control, + formState: { errors, isValid }, + } = useForm({ + defaultValues: { + selectedGenericMetrics: editingItem?.genericMetrics?.key ?? "", + }, + }); + const formValues = useWatch({ control }); + + const deepClone = (obj: GenericsMetrics | undefined) => { + if (!obj) { + return obj; + } + return JSON.parse(JSON.stringify(obj)) as GenericsMetrics; + }; + + function getLayoutValues(): LayoutItem { + if (editingItem) { + return {} as LayoutItem; + } + + return { + w: 12, + h: 20, + minW: 10, + minH: 15, + static: false, + } as LayoutItem; + } + + useEffect(() => { + const genericMetrics = deepClone( + GENERIC_METRICS.find((g) => g.key === formValues.selectedGenericMetrics) + ); + onChange({ ...getLayoutValues(), genericMetrics }, true); + }, [formValues]); + + return ( +
+ Generic Metrics + ( + + )} + /> +
+ ); +}; diff --git a/keep-ui/app/(keep)/dashboard/widget-types/metric/metric-grid-item.tsx b/keep-ui/app/(keep)/dashboard/widget-types/metric/metric-grid-item.tsx new file mode 100644 index 0000000000..0aeb652760 --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/widget-types/metric/metric-grid-item.tsx @@ -0,0 +1,36 @@ +import React from "react"; +import { AreaChart } from "@tremor/react"; +import { WidgetData } from "../../types"; + +interface GridItemProps { + item: WidgetData; +} + +const GridItem: React.FC = ({ item }) => { + return ( +
+
+ + `${Intl.NumberFormat().format(number).toString()}` + } + startEndOnly + connectNulls + showLegend={false} + showTooltip={true} + xAxisLabel="Timestamp" + /> +
+
+ ); +}; + +export default GridItem; diff --git a/keep-ui/app/(keep)/dashboard/widget-types/metric/metric-widget-form.tsx b/keep-ui/app/(keep)/dashboard/widget-types/metric/metric-widget-form.tsx new file mode 100644 index 0000000000..d105c21246 --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/widget-types/metric/metric-widget-form.tsx @@ -0,0 +1,76 @@ +import { Select, SelectItem, Subtitle } from "@tremor/react"; +import { useEffect } from "react"; +import { Controller, get, useForm, useWatch } from "react-hook-form"; +import { MetricsWidget } from "@/utils/hooks/useDashboardMetricWidgets"; +import { LayoutItem } from "../../types"; + +interface PresetForm { + selectedMetricWidget: string; +} + +export interface MetricWidgetFormProps { + metricWidgets: MetricsWidget[]; + editingItem?: any; + onChange: (formState: any, isValid: boolean) => void; +} + +export const MetricWidgetForm: React.FC = ({ + metricWidgets, + editingItem, + onChange, +}) => { + const { + control, + formState: { errors, isValid }, + } = useForm({ + defaultValues: { + selectedMetricWidget: editingItem?.metric?.id ?? "", + }, + }); + const formValues = useWatch({ control }); + + useEffect(() => { + const metric = metricWidgets.find( + (p) => p.id === formValues.selectedMetricWidget + ); + onChange({ ...getLayoutValues(), metric }, isValid); + }, [formValues]); + + function getLayoutValues(): LayoutItem { + if (editingItem) { + return {} as LayoutItem; + } + + return { + w: 6, + h: 8, + minW: 2, + minH: 7, + static: false, + } as LayoutItem; + } + + return ( +
+ Widget + ( + + )} + /> +
+ ); +}; diff --git a/keep-ui/app/(keep)/dashboard/widget-types/preset/columns-selection.tsx b/keep-ui/app/(keep)/dashboard/widget-types/preset/columns-selection.tsx new file mode 100644 index 0000000000..a6cd94123a --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/widget-types/preset/columns-selection.tsx @@ -0,0 +1,52 @@ +import { useFacetPotentialFields } from "@/features/filter/hooks"; +import { MultiSelect, MultiSelectItem } from "@tremor/react"; +import React, { useEffect, useMemo, useState } from "react"; +import { defaultColumns } from "./constants"; + +interface ColumnsSelectionProps { + selectedColumns?: string[]; + onChange: (selected: string[]) => void; +} + +const ColumnsSelection: React.FC = ({ + selectedColumns, + onChange, +}) => { + const [selectedColumnsState, setSelectedColumnsState] = useState>( + new Set(selectedColumns || defaultColumns) + ); + const { data } = useFacetPotentialFields("alerts"); + + useEffect( + () => onChange(Array.from(selectedColumnsState)), + [selectedColumnsState] + ); + + const sortedOptions = useMemo(() => { + return data?.slice().sort((first, second) => { + const inSetA = selectedColumnsState.has(first); + const inSetB = selectedColumnsState.has(second); + + if (inSetA && !inSetB) return -1; + if (!inSetA && inSetB) return 1; + + return first.localeCompare(second); + }); + }, [data, selectedColumnsState]); + + return ( + setSelectedColumnsState(new Set(selected))} + > + {sortedOptions?.map((field) => ( + + {field} + + ))} + + ); +}; + +export default ColumnsSelection; diff --git a/keep-ui/app/(keep)/dashboard/widget-types/preset/constants.ts b/keep-ui/app/(keep)/dashboard/widget-types/preset/constants.ts new file mode 100644 index 0000000000..0d5b6274a1 --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/widget-types/preset/constants.ts @@ -0,0 +1,8 @@ +export const defaultColumns = [ + "severity", + "status", + "source", + "name", + "description", + "lastReceived", +]; diff --git a/keep-ui/app/(keep)/dashboard/widget-types/preset/preset-grid-item.tsx b/keep-ui/app/(keep)/dashboard/widget-types/preset/preset-grid-item.tsx new file mode 100644 index 0000000000..8cc8b3a971 --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/widget-types/preset/preset-grid-item.tsx @@ -0,0 +1,231 @@ +import React, { useMemo } from "react"; +import { WidgetData, WidgetType, PresetPanelType } from "../../types"; +import { usePresetAlertsCount } from "@/features/presets/custom-preset-links"; +import { useDashboardPreset } from "@/utils/hooks/useDashboardPresets"; +import { Button, Icon } from "@tremor/react"; +import { FireIcon } from "@heroicons/react/24/outline"; +import * as Tooltip from "@radix-ui/react-tooltip"; +import Skeleton from "react-loading-skeleton"; +import "react-loading-skeleton/dist/skeleton.css"; +import { useRouter } from "next/navigation"; +import TimeAgo from "react-timeago"; +import { useSearchParams } from "next/navigation"; +import WidgetAlertsTable from "./widget-alerts-table"; +import WidgetAlertCountPanel from "./widget-alert-count-panel"; +import CelInput from "@/features/cel-input/cel-input"; + +interface GridItemProps { + item: WidgetData; +} + +const PresetGridItem: React.FC = ({ item }) => { + const searchParams = useSearchParams(); + const timeRangeCel = useMemo(() => { + const timeRangeSearchParam = searchParams.get("time_stamp"); + if (timeRangeSearchParam) { + const parsedTimeRange = JSON.parse(timeRangeSearchParam); + return `lastReceived >= "${parsedTimeRange.start}" && lastReceived <= "${parsedTimeRange.end}"`; + } + return ""; + }, [searchParams]); + const presets = useDashboardPreset(); + const countOfLastAlerts = (item.preset as any).countOfLastAlerts; + const preset = useMemo( + () => presets.find((preset) => preset.id === item.preset?.id), + [presets, item.preset?.id] + ); + const presetCel = useMemo( + () => preset?.options.find((option) => option.label === "CEL")?.value || "", + [preset] + ); + const filterCel = useMemo( + () => [timeRangeCel, presetCel].filter(Boolean).join(" && "), + [presetCel, timeRangeCel] + ); + + const { + alerts, + totalCount: presetAlertsCount, + isLoading, + } = usePresetAlertsCount( + filterCel, + !!preset?.counter_shows_firing_only, + countOfLastAlerts, + 0, + 10000 // refresh interval + ); + const router = useRouter(); + + function handleGoToPresetClick() { + router.push(`/alerts/${preset?.name.toLowerCase()}`); + } + + const getColor = () => { + let color = "#000000"; + if ( + item.widgetType === WidgetType.PRESET && + item.thresholds && + item.preset + ) { + for (let i = item.thresholds.length - 1; i >= 0; i--) { + if (item.preset && presetAlertsCount >= item.thresholds[i].value) { + color = item.thresholds[i].color; + break; + } + } + } + + return color; + }; + + function hexToRgb(hex: string, alpha: number = 1) { + // Remove '#' if present + hex = hex.replace(/^#/, ""); + + // Handle shorthand form (#f44 → #ff4444) + if (hex.length === 3) { + hex = hex + .split("") + .map((c) => c + c) + .join(""); + } + + const bigint = parseInt(hex, 16); + const r = (bigint >> 16) & 255; + const g = (bigint >> 8) & 255; + const b = bigint & 255; + + return `rgb(${r}, ${g}, ${b}, ${alpha})`; + } + + function renderCEL() { + if (!presetCel) { + return; + } + + return ( +
+
Preset CEL:
+ + + + + + + +
+ {presetCel} +
+ +
+
+
+
+
+ ); + } + + function renderAlertsCountText() { + const label = preset?.counter_shows_firing_only + ? "Firing alerts count:" + : "Alerts count:"; + let state: string = "nothingToShow"; + + if (countOfLastAlerts > 0) { + if (presetAlertsCount <= countOfLastAlerts) { + state = "allAlertsShown"; + } else { + state = "someAlertsShown"; + } + } + + return ( +
+
{label}
+
+ {isLoading && ( + + )} + {!isLoading && ( + <> + {state === "nothingToShow" && ( + {presetAlertsCount} alerts + )} + {state === "allAlertsShown" && ( + showing {presetAlertsCount} alerts + )} + {state === "someAlertsShown" && ( + + showing {countOfLastAlerts} out of {presetAlertsCount} + + )} + + {preset?.counter_shows_firing_only && ( + + )} + + )} +
+
+ ); + } + + const isAlertTable = item.presetPanelType === PresetPanelType.ALERT_TABLE || !item.presetPanelType; + const isAlertCountPanel = item.presetPanelType === PresetPanelType.ALERT_COUNT_PANEL; + + return ( +
+ {isAlertTable && ( + <> +
+
+
+
Preset name:
+
{preset?.name}
+
+ {renderCEL()} + {renderAlertsCountText()} +
+
+ +
+
+ {countOfLastAlerts > 0 && ( + + )} + + )} + {isAlertCountPanel && ( + + )} +
+ ); +}; + +export default PresetGridItem; diff --git a/keep-ui/app/(keep)/dashboard/widget-types/preset/preset-widget-form.tsx b/keep-ui/app/(keep)/dashboard/widget-types/preset/preset-widget-form.tsx new file mode 100644 index 0000000000..984934fb38 --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/widget-types/preset/preset-widget-form.tsx @@ -0,0 +1,324 @@ +import { Trashcan } from "@/components/icons"; +import { Preset } from "@/entities/presets/model"; +import { + Button, + Icon, + Select, + SelectItem, + Subtitle, + TextInput, + Switch, +} from "@tremor/react"; +import { useEffect, useMemo, useState } from "react"; +import { + Controller, + get, + useForm, + useWatch, + useFieldArray, +} from "react-hook-form"; +import { LayoutItem, Threshold, PresetPanelType } from "../../types"; +import ColumnsSelection from "./columns-selection"; + +interface PresetForm { + selectedPreset: string; + countOfLastAlerts: string; + thresholds: Threshold[]; + presetPanelType: PresetPanelType; + showFiringOnly: boolean; + customLink?: string; +} + +export interface PresetWidgetFormProps { + editingItem?: any; + presets: Preset[]; + onChange: (formState: any, isValid: boolean) => void; +} + +export const PresetWidgetForm: React.FC = ({ + editingItem, + presets, + onChange, +}: PresetWidgetFormProps) => { + const { + control, + formState: { errors, isValid }, + register, + } = useForm({ + defaultValues: { + selectedPreset: editingItem?.preset?.id, + countOfLastAlerts: editingItem + ? editingItem.preset.countOfLastAlerts || 0 + : 5, + thresholds: editingItem?.thresholds || [ + { value: 0, color: "#10b981" }, // Bold emerald green + { value: 20, color: "#dc2626" }, // Bold red + ], + presetPanelType: editingItem?.presetPanelType || PresetPanelType.ALERT_TABLE, + showFiringOnly: editingItem?.showFiringOnly ?? false, + customLink: editingItem?.customLink || "", + }, + }); + const [presetColumns, setPresetColumns] = useState( + editingItem ? editingItem.presetColumns : undefined + ); + + const { fields, append, remove, move, replace } = useFieldArray({ + control, + name: "thresholds", + }); + + const formValues = useWatch({ control }); + + const normalizedFormValues = useMemo(() => { + return { + countOfLastAlerts: parseInt(formValues.countOfLastAlerts || "0"), + selectedPreset: presets.find((p) => p.id === formValues.selectedPreset), + presetColumns, + thresholds: formValues.thresholds?.map((t) => ({ + ...t, + value: parseInt(t.value?.toString() as string, 10) || 0, + })), + presetPanelType: formValues.presetPanelType || PresetPanelType.ALERT_TABLE, + showFiringOnly: formValues.showFiringOnly ?? false, + customLink: formValues.customLink || "", + }; + }, [formValues, presetColumns]); + + function getLayoutValues(): LayoutItem { + if (editingItem) { + return {} as LayoutItem; + } + + const isAlertTable = normalizedFormValues.presetPanelType === PresetPanelType.ALERT_TABLE; + const isAlertCountPanel = normalizedFormValues.presetPanelType === PresetPanelType.ALERT_COUNT_PANEL; + + if (isAlertCountPanel) { + // Narrower, more compact layout for count panels with no minimum width + return { + w: 4, + h: 3, + minW: 0, + minH: 2, + static: false, + } as LayoutItem; + } + + // Original layout for alert tables + const itemHeight = isAlertTable && normalizedFormValues.countOfLastAlerts > 0 ? 6 : 4; + const itemWidth = isAlertTable && normalizedFormValues.countOfLastAlerts > 0 ? 8 : 6; + + return { + w: itemWidth, + h: itemHeight, + minW: 4, + minH: 4, + static: false, + } as LayoutItem; + } + + useEffect(() => { + onChange( + { + ...getLayoutValues(), + preset: { + ...normalizedFormValues.selectedPreset, + countOfLastAlerts: normalizedFormValues.countOfLastAlerts, + }, + presetColumns: normalizedFormValues.presetColumns, + thresholds: normalizedFormValues.thresholds, + presetPanelType: normalizedFormValues.presetPanelType, + showFiringOnly: normalizedFormValues.showFiringOnly, + customLink: normalizedFormValues.customLink, + }, + isValid + ); + }, [normalizedFormValues, isValid]); + + const handleThresholdBlur = () => { + const reorderedThreesholds = formValues?.thresholds + ?.map((t) => ({ + ...t, + value: parseInt(t.value?.toString() as string, 10) || 0, + })) + .sort((a, b) => a.value - b.value); + if (!reorderedThreesholds) { + return; + } + replace(reorderedThreesholds as any); + }; + + const handleAddThreshold = () => { + const maxThreshold = Math.max( + ...(formValues.thresholds?.map((t) => t.value) as any), + 0 + ); + append({ value: maxThreshold + 10, color: "#000000" }); + }; + + return ( + <> +
+ Preset + ( + + )} + /> +
+
+ Panel Type + ( + + )} + /> +
+ {formValues.presetPanelType === PresetPanelType.ALERT_COUNT_PANEL && ( + <> +
+
+ Show Firing Alerts Only + ( + + )} + /> +
+
+
+ Custom Link (optional) + ( + + )} + /> +
+ + )} + {formValues.presetPanelType === PresetPanelType.ALERT_TABLE && ( + <> +
+ Last alerts count to display + ( + + )} + /> +
+ setPresetColumns(selectedColumns)} + > + + )} +
+
+ Thresholds + +
+
+ {fields.map((field, index) => ( +
+ + + {fields.length > 1 && ( + + )} +
+ ))} +
+
+ + ); +}; diff --git a/keep-ui/app/(keep)/dashboard/widget-types/preset/widget-alert-count-panel.tsx b/keep-ui/app/(keep)/dashboard/widget-types/preset/widget-alert-count-panel.tsx new file mode 100644 index 0000000000..f67ff244ee --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/widget-types/preset/widget-alert-count-panel.tsx @@ -0,0 +1,205 @@ +import React, { useMemo } from "react"; +import { WidgetData, WidgetType, Threshold } from "../../types"; +import { usePresetAlertsCount } from "@/features/presets/custom-preset-links"; +import { useDashboardPreset } from "@/utils/hooks/useDashboardPresets"; +import { Button, Icon } from "@tremor/react"; +import { FireIcon } from "@heroicons/react/24/outline"; +import Skeleton from "react-loading-skeleton"; +import "react-loading-skeleton/dist/skeleton.css"; +import { useRouter } from "next/navigation"; +import { useSearchParams } from "next/navigation"; + +interface WidgetAlertCountPanelProps { + presetName: string; + showFiringOnly?: boolean; + background?: string; + thresholds?: Threshold[]; + customLink?: string; +} + +const WidgetAlertCountPanel: React.FC = ({ + presetName, + showFiringOnly = false, + background, + thresholds = [], + customLink, +}) => { + const searchParams = useSearchParams(); + const timeRangeCel = useMemo(() => { + const timeRangeSearchParam = searchParams.get("time_stamp"); + if (timeRangeSearchParam) { + const parsedTimeRange = JSON.parse(timeRangeSearchParam); + return `lastReceived >= "${parsedTimeRange.start}" && lastReceived <= "${parsedTimeRange.end}"`; + } + return ""; + }, [searchParams]); + + const presets = useDashboardPreset(); + const preset = useMemo( + () => presets.find((preset) => preset.name === presetName), + [presets, presetName] + ); + + const presetCel = useMemo( + () => preset?.options.find((option) => option.label === "CEL")?.value || "", + [preset] + ); + + const filterCel = useMemo( + () => [timeRangeCel, presetCel].filter(Boolean).join(" && "), + [presetCel, timeRangeCel] + ); + + // Get total alerts count + const { + totalCount: totalAlertsCount, + isLoading: isLoadingTotal, + } = usePresetAlertsCount( + filterCel, + false, // Always get total count + 0, + 0, + 10000 + ); + + // Get firing alerts count + const { + totalCount: firingAlertsCount, + isLoading: isLoadingFiring, + } = usePresetAlertsCount( + filterCel, + true, // Get firing count + 0, + 0, + 10000 + ); + + const isLoading = isLoadingTotal || isLoadingFiring; + + const router = useRouter(); + + function handleGoToPresetClick() { + router.push(`/alerts/${preset?.name.toLowerCase()}`); + } + + function handleCustomLinkClick() { + if (customLink) { + window.open(customLink, '_blank'); + } + } + + const getColor = (count: number) => { + let color = "#1f2937"; // Default dark gray instead of black + if (thresholds && thresholds.length > 0) { + for (let i = thresholds.length - 1; i >= 0; i--) { + if (count >= thresholds[i].value) { + color = thresholds[i].color; + break; + } + } + } + return color; + }; + + function hexToRgb(hex: string, alpha: number = 1) { + // Remove '#' if present + hex = hex.replace(/^#/, ""); + + // Handle shorthand form (#f44 → #ff4444) + if (hex.length === 3) { + hex = hex + .split("") + .map((c) => c + c) + .join(""); + } + + const bigint = parseInt(hex, 16); + const r = (bigint >> 16) & 255; + const g = (bigint >> 8) & 255; + const b = bigint & 255; + + return `rgb(${r}, ${g}, ${b}, ${alpha})`; + } + + const label = showFiringOnly ? "Firing Alerts" : "Total Alerts"; + const displayCount = showFiringOnly ? firingAlertsCount : totalAlertsCount; + const count = isLoading ? "..." : displayCount; + + // Use firing count for threshold colors when showFiringOnly is selected + const thresholdCount = showFiringOnly ? firingAlertsCount : totalAlertsCount; + const color = getColor(thresholdCount); + + return ( +
+ {/* Header with label and button */} +
+
+ {label} + {showFiringOnly && ( + + )} +
+
+ + {customLink && ( + + )} +
+
+
+ + + {/* Main content area with diagonal alignment */} +
+ {/* Preset name and count in diagonal layout */} +
+
+ {preset?.name} +
+
+ {isLoading ? ( + + ) : ( + count + )} +
+
+
+
+
+ ); +}; + +export default WidgetAlertCountPanel; \ No newline at end of file diff --git a/keep-ui/app/(keep)/dashboard/widget-types/preset/widget-alerts-table.tsx b/keep-ui/app/(keep)/dashboard/widget-types/preset/widget-alerts-table.tsx new file mode 100644 index 0000000000..1be85701b1 --- /dev/null +++ b/keep-ui/app/(keep)/dashboard/widget-types/preset/widget-alerts-table.tsx @@ -0,0 +1,226 @@ +import React, { useEffect, useMemo } from "react"; +import { WidgetData, WidgetType } from "../../types"; +import { usePresetAlertsCount } from "@/features/presets/custom-preset-links"; +import { useDashboardPreset } from "@/utils/hooks/useDashboardPresets"; +import { Button, Icon } from "@tremor/react"; +import { FireIcon } from "@heroicons/react/24/outline"; +import { DynamicImageProviderIcon } from "@/components/ui"; +import { getStatusColor, getStatusIcon } from "@/shared/lib/status-utils"; +import { getNestedValue } from "@/shared/lib/object-utils"; +import { SeverityBorderIcon, UISeverity } from "@/shared/ui"; +import { severityMapping } from "@/entities/alerts/model"; +import * as Tooltip from "@radix-ui/react-tooltip"; +import Skeleton from "react-loading-skeleton"; +import "react-loading-skeleton/dist/skeleton.css"; +import { useRouter } from "next/navigation"; +import TimeAgo from "react-timeago"; +import { useSearchParams } from "next/navigation"; +import { useLocalStorage } from "@/utils/hooks/useLocalStorage"; +import { ColumnRenameMapping } from "@/widgets/alerts-table/ui/alert-table-column-rename"; +import { DEFAULT_COLS } from "@/widgets/alerts-table/lib/alert-table-utils"; +import { ColumnOrderState } from "@tanstack/table-core"; +import { startCase } from "lodash"; +import { defaultColumns } from "./constants"; + +interface WidgetAlertsTableProps { + presetName: string; + alerts?: any[]; + columns?: string[]; + background?: string; +} + +const WidgetAlertsTable: React.FC = ({ + presetName, + alerts, + columns, + background, +}) => { + const columnsGapClass = "pr-3"; + const borderClass = "border-b"; + + const [columnRenameMapping] = useLocalStorage( + `column-rename-mapping-${presetName}`, + {} + ); + + const [presetOrderedColumns] = useLocalStorage( + `column-order-${presetName}`, + DEFAULT_COLS + ); + + const columnsMeta: { [key: string]: any } = useMemo( + () => ({ + severity: { + gridColumnTemplate: "min-content", + renderHeader: () =>
, + renderValue: (alert: any) => ( + + ), + }, + status: { + gridColumnTemplate: "min-content", + renderHeader: () =>
, + renderValue: (alert: any) => ( + + ), + }, + source: { + gridColumnTemplate: "min-content", + renderHeader: () =>
, + renderValue: (alert: any) => ( + + ), + }, + name: { + gridColumnTemplate: "minmax(100px, 1fr)", + renderValue: (alert: any) => ( +
+ {alert.name} +
+ ), + }, + description: { + gridColumnTemplate: "minmax(100px, 1fr)", + renderValue: (alert: any) => ( +
+ {alert.description} +
+ ), + }, + lastReceived: { + gridColumnTemplate: "min-content", + renderValue: (alert: any) => , + }, + }), + [columnRenameMapping] + ); + + const orderedColumns = useMemo(() => { + const presetColumns: string[] = columns || defaultColumns; + const indexed: { [key: string]: number } = ( + presetOrderedColumns || defaultColumns + ).reduce((prev, curr, index) => ({ ...prev, [curr]: index }), {}); + + return presetColumns.slice().sort((firstColum, secondColumn) => { + const indexOfFirst = indexed[firstColum] || 0; + const indexOfSecond = indexed[secondColumn] || 0; + return indexOfFirst - indexOfSecond; + }); + }, [columns, presetOrderedColumns]); + + function renderHeaders() { + return orderedColumns?.map((column, index) => { + const columnMeta = columnsMeta[column]; + let columnHeaderValue; + if (columnMeta?.renderHeader) { + columnHeaderValue = columnMeta.renderHeader(); + } else { + columnHeaderValue = ( +
+ {columnRenameMapping[column] || startCase(column)} +
+ ); + } + + return ( +
+ {columnHeaderValue} +
+ ); + }); + } + + function renderTableBody() { + const alertsToRender = alerts || Array.from({ length: 5 }).fill(undefined); + + return alertsToRender + ?.map((alert, alertIndex) => { + return orderedColumns?.map((column, index) => { + const columnMeta = columnsMeta[column]; + let columnValue; + if (!alert) { + columnValue = ; + } else if (columnMeta?.renderValue) { + columnValue = columnMeta.renderValue(alert); + } else { + columnValue = ( +
{getNestedValue(alert, column)}
+ ); + } + const _columnsGapClass = + index < orderedColumns.length - 1 ? columnsGapClass : ""; + const _borderClass = + alertIndex < alertsToRender.length - 1 ? borderClass : ""; + + return ( +
+ {columnValue} +
+ ); + }); + }) + .flat(); + } + + const gridTemplateColumns = useMemo( + () => + orderedColumns + ?.map((column) => { + const columnMeta = columnsMeta[column]; + let gridColumnTemplate = "auto"; + + if (columnMeta?.gridColumnTemplate) { + gridColumnTemplate = columnMeta.gridColumnTemplate; + } else { + // Default sizing for arbitrary columns + gridColumnTemplate = "minmax(auto, 1fr)"; + } + + return gridColumnTemplate; + }) + .join(" "), + [orderedColumns, columnsMeta] + ); + + return ( +
+
+ {renderHeaders()} + {renderTableBody()} +
+
+ ); +}; + +export default WidgetAlertsTable; diff --git a/keep-ui/app/(keep)/deduplication/DeduplicationPlaceholder.tsx b/keep-ui/app/(keep)/deduplication/DeduplicationPlaceholder.tsx new file mode 100644 index 0000000000..a3cdcae672 --- /dev/null +++ b/keep-ui/app/(keep)/deduplication/DeduplicationPlaceholder.tsx @@ -0,0 +1,34 @@ +import { Card, Subtitle, Title } from "@tremor/react"; +import Link from "next/link"; +import Image from "next/image"; +import deduplicationPlaceholder from "./deduplication-placeholder.svg"; + +export const DeduplicationPlaceholder = () => { + return ( + <> + +
+ No Deduplications Yet + + Alert deduplication is the first layer of denoising. It groups + similar alerts from one source. +
To connect alerts across sources into incidents, check{" "} + + Correlations + +
+ + This page will become active once the first alerts are registered. + +
+ Deduplication +
+ + ); +}; diff --git a/keep-ui/app/(keep)/deduplication/DeduplicationSidebar.tsx b/keep-ui/app/(keep)/deduplication/DeduplicationSidebar.tsx new file mode 100644 index 0000000000..9178c2b120 --- /dev/null +++ b/keep-ui/app/(keep)/deduplication/DeduplicationSidebar.tsx @@ -0,0 +1,548 @@ +import { useEffect, useState, useMemo } from "react"; +import { Dialog } from "@headlessui/react"; +import { useForm, Controller, SubmitHandler } from "react-hook-form"; +import { + Text, + Button, + TextInput, + Callout, + Badge, + Switch, + Icon, + Title, + Card, +} from "@tremor/react"; +import { IoMdClose } from "react-icons/io"; +import { DeduplicationRule } from "@/app/(keep)/deduplication/models"; +import { useDeduplicationFields } from "utils/hooks/useDeduplicationRules"; +import { Select } from "@/shared/ui"; +import { + ExclamationTriangleIcon, + InformationCircleIcon, +} from "@heroicons/react/24/outline"; +import { KeyedMutator } from "swr"; +import { useApi } from "@/shared/lib/hooks/useApi"; +import { KeepApiError } from "@/shared/api"; +import { Providers } from "@/shared/api/providers"; +import SidePanel from "@/components/SidePanel"; +import { useConfig } from "@/utils/hooks/useConfig"; + +interface ProviderOption { + value: string; + label: string; + logoUrl: string; +} + +interface DeduplicationSidebarProps { + isOpen: boolean; + toggle: VoidFunction; + selectedDeduplicationRule: DeduplicationRule | null; + onSubmit: (data: Partial) => Promise; + mutateDeduplicationRules: KeyedMutator; + providers: { installed_providers: Providers; linked_providers: Providers }; +} + +const DeduplicationSidebar: React.FC = ({ + isOpen, + toggle, + selectedDeduplicationRule, + onSubmit, + mutateDeduplicationRules, + providers, +}) => { + const { + control, + handleSubmit, + setValue, + reset, + setError, + watch, + formState: { errors }, + clearErrors, + } = useForm>({ + defaultValues: selectedDeduplicationRule || { + name: "", + description: "", + provider_type: "", + provider_id: "", + fingerprint_fields: [], + full_deduplication: false, + ignore_fields: [], + }, + }); + + const [isSubmitting, setIsSubmitting] = useState(false); + + const { data: config } = useConfig(); + + const { data: deduplicationFields = {} } = useDeduplicationFields(); + const api = useApi(); + + const alertProviders = useMemo( + () => + [ + { id: null, type: "keep", details: { name: "Keep" }, tags: ["alert"] }, + ...providers.installed_providers, + ...providers.linked_providers, + ].filter((provider) => provider.tags?.includes("alert")), + [providers] + ); + const fullDeduplication = watch("full_deduplication"); + const selectedProviderType = watch("provider_type"); + const selectedProviderId = watch("provider_id"); + const fingerprintFields = watch("fingerprint_fields"); + const ignoreFields = watch("ignore_fields"); + + const availableFields = useMemo(() => { + const defaultFields = [ + "source", + "service", + "description", + "fingerprint", + "name", + "lastReceived", + ]; + if (selectedProviderType) { + const key = `${selectedProviderType}_${selectedProviderId || "null"}`; + const providerFields = deduplicationFields[key] || []; + return [ + ...new Set([ + ...defaultFields, + ...providerFields, + ...(fingerprintFields ?? []), + ...(ignoreFields ?? []), + ]), + ]; + } + return [...new Set([...defaultFields, ...(fingerprintFields ?? [])])]; + }, [ + selectedProviderType, + selectedProviderId, + deduplicationFields, + fingerprintFields, + ignoreFields, + ]); + + useEffect(() => { + if (isOpen && selectedDeduplicationRule) { + reset(selectedDeduplicationRule); + } else if (isOpen) { + reset({ + name: "", + description: "", + provider_type: "", + provider_id: "", + fingerprint_fields: [], + full_deduplication: false, + ignore_fields: [], + }); + } + }, [isOpen, selectedDeduplicationRule, reset]); + + const handleToggle = () => { + if (isOpen) { + clearErrors(); + } + toggle(); + }; + + const onFormSubmit: SubmitHandler> = async ( + data + ) => { + setIsSubmitting(true); + clearErrors(); + try { + let url = "/deduplications"; + + if (selectedDeduplicationRule && selectedDeduplicationRule.id) { + url += `/${selectedDeduplicationRule.id}`; + } + + const method = + !selectedDeduplicationRule || !selectedDeduplicationRule.id + ? "POST" + : "PUT"; + + const response = + method === "POST" + ? await api.post(url, data) + : await api.put(url, data); + + console.log("Deduplication rule saved:", data); + reset(); + handleToggle(); + await mutateDeduplicationRules(); + } catch (error) { + if (error instanceof KeepApiError) { + setError("root.serverError", { + type: "manual", + message: error.message || "Failed to save deduplication rule", + }); + } else { + setError("root.serverError", { + type: "manual", + message: "An unexpected error occurred", + }); + } + } finally { + setIsSubmitting(false); + } + }; + + return ( + +
+
+ + {selectedDeduplicationRule + ? `Edit ${selectedDeduplicationRule.name}` + : "Add deduplication rule"} + {selectedDeduplicationRule?.default && ( + + Default Rule + + )} + +
+
+ +
+
+ + {selectedDeduplicationRule?.default && ( +
+ + Editing a default deduplication rule requires advanced knowledge. + Default rules are carefully designed to provide optimal + deduplication for specific alert types. Modifying these rules may + impact the efficiency of your alert processing. If you're + unsure about making changes, we recommend creating a new custom rule + instead of modifying the default one. +

+ + Learn more about deduplication rules + +
+
+ )} + + {selectedDeduplicationRule?.is_provisioned && ( +
+ + + Editing a provisioned deduplication rule is not allowed. Please + contact your system administrator for more information. + + +
+ )} + +
+
+ +
+
+ + Rule name + + ( + + )} + /> +
+
+ + Description + + ( + + )} + /> +
+
+ + Provider + + + + + Select the provider for which this deduplication rule + will apply. This determines the source of alerts that + will be processed by this rule. + + + + + ( + ({ + value: fieldName, + label: fieldName, + }))} + placeholder="Select fingerprint fields" + value={field.value?.map((value: string) => ({ + value, + label: value, + }))} + onChange={(selectedOptions) => { + field.onChange( + selectedOptions.map( + (option: { value: string }) => option.value + ) + ); + }} + noOptionsMessage={() => + selectedProviderType + ? "No options" + : "Please choose provider to see available fields" + } + /> + )} + /> + {errors.fingerprint_fields && ( +

+ {errors.fingerprint_fields.message} +

+ )} +
+
+
+ ( + + )} + /> + + Full deduplication + + + + + 1. Full deduplication: Keep will discard events if + they are the same (excluding the 'Ignore + Fields'). +
+ 2. Partial deduplication (default): Uses specified + fields to correlate alerts. E.g., two alerts with same + 'service' and 'env' fields will be + deduped into one alert. +
+
+
+
+
+
+ + {fullDeduplication && ( +
+ + Ignore fields + + ( +