mirror of
https://github.com/grafana/grafana.git
synced 2026-02-03 20:49:50 -05:00
Merge branch 'main' into fix/authorize-lbac-access
This commit is contained in:
commit
781f11dcb9
84 changed files with 3215 additions and 1017 deletions
|
|
@ -80,7 +80,7 @@ require (
|
|||
github.com/google/gnostic-models v0.7.1 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grafana/alerting v0.0.0-20260129164026-85d7010c64b8 // indirect
|
||||
github.com/grafana/alerting v0.0.0-20260203165836-8b17916e8173 // indirect
|
||||
github.com/grafana/authlib v0.0.0-20260203153107-16a114a99f67 // indirect
|
||||
github.com/grafana/authlib/types v0.0.0-20260203131350-b83e80394acc // indirect
|
||||
github.com/grafana/dataplane/sdata v0.0.9 // indirect
|
||||
|
|
|
|||
|
|
@ -342,8 +342,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
|
|||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/grafana/alerting v0.0.0-20260129164026-85d7010c64b8 h1:964kdD/6Xyzr4g910nZnMtj0z16ijsvpA8Ju4sFOLjA=
|
||||
github.com/grafana/alerting v0.0.0-20260129164026-85d7010c64b8/go.mod h1:Ji0SfJChcwjgq8ljy6Y5CcYfHfAYKXjKYeysOoDS/6s=
|
||||
github.com/grafana/alerting v0.0.0-20260203165836-8b17916e8173 h1:nrQnGVRvBQK1zmg9rB6TA6tOeS0sSsUUV9JS1erkw2Q=
|
||||
github.com/grafana/alerting v0.0.0-20260203165836-8b17916e8173/go.mod h1:Ji0SfJChcwjgq8ljy6Y5CcYfHfAYKXjKYeysOoDS/6s=
|
||||
github.com/grafana/authlib v0.0.0-20260203153107-16a114a99f67 h1:4t3595k0Ef94NOlg4Br785+cTgAKa4rqeo9lMHbV1fs=
|
||||
github.com/grafana/authlib v0.0.0-20260203153107-16a114a99f67/go.mod h1:za8MGa5J9Bbgm2XorXc+FbGe72ln46OpN5o8P1uX9Og=
|
||||
github.com/grafana/authlib/types v0.0.0-20260203131350-b83e80394acc h1:wagsf4me4j/UFNocyMJHz5/803XpnfGJtNj8/YWy0j0=
|
||||
|
|
|
|||
|
|
@ -98,7 +98,7 @@ require (
|
|||
github.com/google/gnostic-models v0.7.1 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grafana/alerting v0.0.0-20260129164026-85d7010c64b8 // indirect
|
||||
github.com/grafana/alerting v0.0.0-20260203165836-8b17916e8173 // indirect
|
||||
github.com/grafana/authlib v0.0.0-20260203153107-16a114a99f67 // indirect
|
||||
github.com/grafana/authlib/types v0.0.0-20260203131350-b83e80394acc // indirect
|
||||
github.com/grafana/dataplane/sdata v0.0.9 // indirect
|
||||
|
|
|
|||
|
|
@ -217,8 +217,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
|||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grafana/alerting v0.0.0-20260129164026-85d7010c64b8 h1:964kdD/6Xyzr4g910nZnMtj0z16ijsvpA8Ju4sFOLjA=
|
||||
github.com/grafana/alerting v0.0.0-20260129164026-85d7010c64b8/go.mod h1:Ji0SfJChcwjgq8ljy6Y5CcYfHfAYKXjKYeysOoDS/6s=
|
||||
github.com/grafana/alerting v0.0.0-20260203165836-8b17916e8173 h1:nrQnGVRvBQK1zmg9rB6TA6tOeS0sSsUUV9JS1erkw2Q=
|
||||
github.com/grafana/alerting v0.0.0-20260203165836-8b17916e8173/go.mod h1:Ji0SfJChcwjgq8ljy6Y5CcYfHfAYKXjKYeysOoDS/6s=
|
||||
github.com/grafana/authlib v0.0.0-20260203153107-16a114a99f67 h1:4t3595k0Ef94NOlg4Br785+cTgAKa4rqeo9lMHbV1fs=
|
||||
github.com/grafana/authlib v0.0.0-20260203153107-16a114a99f67/go.mod h1:za8MGa5J9Bbgm2XorXc+FbGe72ln46OpN5o8P1uX9Og=
|
||||
github.com/grafana/authlib/types v0.0.0-20260203131350-b83e80394acc h1:wagsf4me4j/UFNocyMJHz5/803XpnfGJtNj8/YWy0j0=
|
||||
|
|
|
|||
|
|
@ -19,97 +19,6 @@ labels:
|
|||
menuTitle: Amazon CloudWatch
|
||||
title: Amazon CloudWatch data source
|
||||
weight: 200
|
||||
refs:
|
||||
logs:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/logs/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/logs/
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
provisioning-data-sources:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
configure-grafana-aws:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#aws
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#aws
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/
|
||||
build-dashboards:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
data-source-management:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
configure-cloudwatch:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/configure/
|
||||
cloudwatch-query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/query-editor/
|
||||
cloudwatch-template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/template-variables/
|
||||
cloudwatch-aws-authentication:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/
|
||||
query-caching:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/#query-and-resource-caching
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/#query-and-resource-caching
|
||||
variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/visualizations/dashboards/variables/
|
||||
annotate-visualizations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/visualizations/dashboards/build-dashboards/annotate-visualizations/
|
||||
set-up-grafana-monitoring:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/set-up-grafana-monitoring/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/set-up-grafana-monitoring/
|
||||
transformations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/visualizations/panels-visualizations/query-transform-data/transform-data/
|
||||
visualizations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/visualizations/panels-visualizations/visualizations/
|
||||
cloudwatch-troubleshooting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/troubleshooting/
|
||||
---
|
||||
|
||||
# Amazon CloudWatch data source
|
||||
|
|
@ -120,11 +29,11 @@ Grafana includes native support for the Amazon CloudWatch plugin, so there's no
|
|||
|
||||
The following documents will help you get started working with the CloudWatch data source:
|
||||
|
||||
- [Configure the CloudWatch data source](ref:configure-cloudwatch)
|
||||
- [CloudWatch query editor](ref:cloudwatch-query-editor)
|
||||
- [Templates and variables](ref:cloudwatch-template-variables)
|
||||
- [Configure AWS authentication](ref:cloudwatch-aws-authentication)
|
||||
- [Troubleshoot CloudWatch issues](ref:cloudwatch-troubleshooting)
|
||||
- [Configure the CloudWatch data source](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/configure/)
|
||||
- [CloudWatch query editor](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/query-editor/)
|
||||
- [Templates and variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/template-variables/)
|
||||
- [Configure AWS authentication](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/)
|
||||
- [Troubleshoot CloudWatch issues](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/troubleshooting/)
|
||||
|
||||
## Import pre-configured dashboards
|
||||
|
||||
|
|
@ -145,7 +54,7 @@ To import curated dashboards:
|
|||
|
||||
1. Click **Import** for each dashboard you want to import.
|
||||
|
||||
 CloudWatch pre-configured dashboards
|
||||

|
||||
|
||||
To customize one of these dashboards, Grafana recommends saving it under a different name; otherwise, Grafana upgrades will overwrite your customizations with the new version.
|
||||
|
||||
|
|
@ -153,12 +62,12 @@ To customize one of these dashboards, Grafana recommends saving it under a diffe
|
|||
|
||||
After installing and configuring the Amazon CloudWatch data source, you can:
|
||||
|
||||
- Create a wide variety of [visualizations](ref:visualizations)
|
||||
- Configure and use [templates and variables](ref:variables)
|
||||
- Add [transformations](ref:transformations)
|
||||
- Add [annotations](ref:annotate-visualizations)
|
||||
- Set up [alerting](ref:alerting)
|
||||
- Optimize performance with [query caching](ref:query-caching)
|
||||
- Create a wide variety of [visualizations](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/)
|
||||
- Configure and use [templates and variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/dashboards/variables/)
|
||||
- Add [transformations](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/)
|
||||
- Add [annotations](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/)
|
||||
- Set up [alerting](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/alerting/)
|
||||
- Optimize performance with [query caching](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/#query-and-resource-caching)
|
||||
|
||||
## Control pricing
|
||||
|
||||
|
|
|
|||
|
|
@ -16,17 +16,6 @@ labels:
|
|||
menuTitle: AWS authentication
|
||||
title: Configure AWS authentication
|
||||
weight: 400
|
||||
refs:
|
||||
configure-grafana-assume-role-enabled:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#assume_role_enabled
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#assume_role_enabled
|
||||
configure-grafana-allowed-auth-providers:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#allowed_auth_providers
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#allowed_auth_providers
|
||||
---
|
||||
|
||||
# Configure AWS authentication
|
||||
|
|
@ -49,7 +38,7 @@ This document explores the following topics:
|
|||
|
||||
Available authentication methods depend on your configuration and the environment where Grafana runs.
|
||||
|
||||
Open source Grafana enables the `AWS SDK Default`, `Credentials file`, and `Access and secret key` methods by default. Cloud Grafana enables only `Access and secret key` by default. Users with server configuration access can enable or disable specific auth providers as needed. For more information, refer to the [`allowed_auth_providers` documentation](ref:configure-grafana-allowed-auth-providers).
|
||||
Open source Grafana enables the `AWS SDK Default`, `Credentials file`, and `Access and secret key` methods by default. Cloud Grafana enables only `Access and secret key` by default. Users with server configuration access can enable or disable specific auth providers as needed. For more information, refer to the [`allowed_auth_providers` documentation](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#allowed_auth_providers).
|
||||
|
||||
- `AWS SDK Default` uses the [default provider](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html) from the [AWS SDK for Go](https://github.com/aws/aws-sdk-go) without custom configuration.
|
||||
This method requires configuring AWS credentials outside Grafana through [the CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html), or by [attaching credentials directly to an EC2 instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html), [in an ECS task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html), or for a [Service Account in a Kubernetes cluster](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). You can attach permissions directly to the data source with AWS SDK Default or combine it with the optional [`Assume Role ARN`](#assume-a-role) field.
|
||||
|
|
@ -76,7 +65,7 @@ Instead, assume role functionality lets you use one set of AWS credentials acros
|
|||
|
||||
If the **Assume Role ARN** field is left empty, Grafana uses the provided credentials from the selected authentication method directly, and permissions to AWS data must be attached directly to those credentials. The **Assume Role ARN** field is optional for all authentication methods except for Grafana Assume Role.
|
||||
|
||||
To disable this feature in open source Grafana or Grafana Enterprise, refer to [`assume_role_enabled`](ref:configure-grafana-assume-role-enabled).
|
||||
To disable this feature in open source Grafana or Grafana Enterprise, refer to [`assume_role_enabled`](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#assume_role_enabled).
|
||||
|
||||
### Use an external ID
|
||||
|
||||
|
|
|
|||
|
|
@ -20,47 +20,11 @@ labels:
|
|||
menuTitle: Configure
|
||||
title: Configure CloudWatch
|
||||
weight: 100
|
||||
refs:
|
||||
logs:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/logs/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/logs/
|
||||
provisioning-data-sources:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
configure-grafana-aws:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#aws
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#aws
|
||||
data-source-management:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
cloudwatch-aws-authentication:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/
|
||||
private-data-source-connect:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
configure-pdc:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
---
|
||||
|
||||
# Configure the Amazon CloudWatch data source
|
||||
|
||||
This document provides instructions for configuring the Amazon CloudWatch data source and explains available configuration options. For general information on adding and managing data sources, refer to [Data source management](ref:data-source-management).
|
||||
This document provides instructions for configuring the Amazon CloudWatch data source and explains available configuration options. For general information on adding and managing data sources, refer to [Data source management](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/).
|
||||
|
||||
## Before you begin
|
||||
|
||||
|
|
@ -94,7 +58,7 @@ The following are configuration options for the CloudWatch data source.
|
|||
Grafana plugin requests to AWS are made on behalf of an AWS Identity and Access Management (IAM) role or IAM user.
|
||||
The IAM user or IAM role must have the associated policies to perform certain API actions.
|
||||
|
||||
For authentication options and configuration details, refer to [AWS authentication](ref:cloudwatch-aws-authentication).
|
||||
For authentication options and configuration details, refer to [AWS authentication](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/).
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
|
|
@ -137,15 +101,15 @@ You must use both an access key ID and a secret access key to authenticate.
|
|||
| --------------- | ----------------------------------------------------- |
|
||||
| **Data source** | Select the X-Ray data source from the drop-down menu. |
|
||||
|
||||
Grafana automatically creates a link to a trace in X-Ray data source if logs contain the `@xrayTraceId` field. To use this feature, you must already have an X-Ray data source configured. For details, see the [X-Ray data source docs](/grafana/plugins/grafana-X-Ray-datasource/). To view the X-Ray link, select the log row in either the Explore view or dashboard [Logs panel](ref:logs) to view the log details section.
|
||||
Grafana automatically creates a link to a trace in X-Ray data source if logs contain the `@xrayTraceId` field. To use this feature, you must already have an X-Ray data source configured. For details, see the [X-Ray data source docs](/grafana/plugins/grafana-X-Ray-datasource/). To view the X-Ray link, select the log row in either the Explore view or dashboard [Logs panel](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/logs/) to view the log details section.
|
||||
|
||||
To log the `@xrayTraceId`, refer to the [AWS X-Ray documentation](https://docs.aws.amazon.com/xray/latest/devguide/xray-services.html). To provide the field to Grafana, your log queries must also contain the `@xrayTraceId` field, for example by using the query `fields @message, @xrayTraceId`.
|
||||
|
||||
**Private data source connect** - _Only for Grafana Cloud users._
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Private data source connect** | Establishes a private, secured connection between a Grafana Cloud stack and data sources within a private network. Use the drop-down to locate the PDC URL. For setup instructions, refer to [Private data source connect (PDC)](ref:private-data-source-connect) and [Configure PDC](ref:configure-pdc). Click **Manage private data source connect** to open your PDC connection page and view your configuration details. |
|
||||
| Setting | Description |
|
||||
| ------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Private data source connect** | Establishes a private, secured connection between a Grafana Cloud stack and data sources within a private network. Use the drop-down to locate the PDC URL. For setup instructions, refer to [Private data source connect (PDC)](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/) and [Configure PDC](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc). Click **Manage private data source connect** to open your PDC connection page and view your configuration details. |
|
||||
|
||||
After configuring your Amazon CloudWatch data source options, click **Save & test** at the bottom to test the connection. You should see a confirmation dialog box that says:
|
||||
|
||||
|
|
@ -158,7 +122,7 @@ To troubleshoot issues while setting up the CloudWatch data source, check the `/
|
|||
### IAM policy examples
|
||||
|
||||
To read CloudWatch metrics and EC2 tags, instances, regions, and alarms, you must grant Grafana permissions via IAM.
|
||||
You can attach these permissions to the IAM role or IAM user you configured in [AWS authentication](ref:cloudwatch-aws-authentication).
|
||||
You can attach these permissions to the IAM role or IAM user you configured in [AWS authentication](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/).
|
||||
|
||||
**Metrics-only permissions:**
|
||||
|
||||
|
|
@ -309,7 +273,7 @@ You can attach these permissions to the IAM role or IAM user you configured in [
|
|||
Cross-account observability lets you retrieve metrics and logs across different accounts in a single region, but you can't query EC2 Instance Attributes across accounts because those come from the EC2 API and not the CloudWatch API.
|
||||
{{< /admonition >}}
|
||||
|
||||
For more information on configuring authentication, refer to [Configure AWS authentication](ref:cloudwatch-aws-authentication).
|
||||
For more information on configuring authentication, refer to [Configure AWS authentication](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/).
|
||||
|
||||
### CloudWatch Logs data protection
|
||||
|
||||
|
|
@ -317,7 +281,7 @@ CloudWatch Logs can protect data by applying log group data protection policies.
|
|||
|
||||
### Configure the data source with grafana.ini
|
||||
|
||||
The Grafana [configuration file](ref:configure-grafana-aws) includes an `AWS` section where you can configure data source options:
|
||||
The Grafana [configuration file](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#aws) includes an `AWS` section where you can configure data source options:
|
||||
|
||||
| Configuration option | Description |
|
||||
| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
|
|
@ -328,7 +292,7 @@ The Grafana [configuration file](ref:configure-grafana-aws) includes an `AWS` se
|
|||
### Provision the data source
|
||||
|
||||
You can define and configure the data source in YAML files as part of the Grafana provisioning system.
|
||||
For more information about provisioning and available configuration options, refer to [Provision Grafana](ref:provisioning-data-sources).
|
||||
For more information about provisioning and available configuration options, refer to [Provision Grafana](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources).
|
||||
|
||||
**Using AWS SDK (default)**:
|
||||
|
||||
|
|
|
|||
|
|
@ -18,37 +18,11 @@ labels:
|
|||
menuTitle: Query editor
|
||||
title: Amazon CloudWatch query editor
|
||||
weight: 200
|
||||
refs:
|
||||
query-transform-data:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
query-transform-data-navigate-the-query-tab:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/#navigate-the-query-tab
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/#navigate-the-query-tab
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/
|
||||
add-template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/
|
||||
---
|
||||
|
||||
# Amazon CloudWatch query editor
|
||||
|
||||
Grafana provides a query editor for the CloudWatch data source, which allows you to query, visualize, and alert on logs and metrics stored in Amazon CloudWatch. It is located on the [Explore](ref:explore) page. For general documentation on querying data sources in Grafana, refer to [Query and transform data](ref:query-transform-data).
|
||||
Grafana provides a query editor for the CloudWatch data source, which allows you to query, visualize, and alert on logs and metrics stored in Amazon CloudWatch. It is located on the [Explore](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/explore/) page. For general documentation on querying data sources in Grafana, refer to [Query and transform data](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/).
|
||||
|
||||
## Choose a query editing mode
|
||||
|
||||
|
|
@ -142,7 +116,7 @@ The query returns the average CPU utilization for all EC2 instances in the defau
|
|||
|
||||
Auto-scaling events add new instances to the graph without manual instance ID tracking. This feature supports up to 100 metrics.
|
||||
|
||||
Click the [**Query inspector**](ref:query-transform-data-navigate-the-query-tab) button and select **Meta Data** to see the search expression that's automatically built to support wildcards.
|
||||
Click the [**Query inspector**](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/#navigate-the-query-tab) button and select **Meta Data** to see the search expression that's automatically built to support wildcards.
|
||||
|
||||
To learn more about search expressions, refer to the [CloudWatch documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/search-expression-syntax.html).
|
||||
The search expression is defined by default in such a way that the queried metrics must match the defined dimension names exactly.
|
||||
|
|
@ -212,7 +186,7 @@ For details about the Metrics Insights syntax, refer to the [AWS reference docum
|
|||
|
||||
For information about Metrics Insights limits, refer to the [AWS feature documentation](https://docs.aws.amazon.com/console/cloudwatch/metricsinsights).
|
||||
|
||||
You can also augment queries by using [template variables](ref:add-template-variables).
|
||||
You can also augment queries by using [template variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/).
|
||||
|
||||
### Use Metrics Insights keywords
|
||||
|
||||
|
|
@ -299,7 +273,7 @@ WHERE `@message` LIKE '%Exception%'
|
|||
To reference log groups in a monitoring account, use ARNs instead of LogGroup names.
|
||||
|
||||
You can also write queries returning time series data by using the [`stats` command](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_Insights-Visualizing-Log-Data.html).
|
||||
When making `stats` queries in [Explore](ref:explore), ensure you are in Metrics Explore mode.
|
||||
When making `stats` queries in [Explore](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/explore/), ensure you are in Metrics Explore mode.
|
||||
|
||||
### Create queries for alerting
|
||||
|
||||
|
|
@ -318,7 +292,7 @@ filter @message like /Exception/
|
|||
If you receive an error like `input data must be a wide series but got ...` when trying to alert on a query, make sure that your query returns valid numeric data that can be output to a Time series panel.
|
||||
{{< /admonition >}}
|
||||
|
||||
For more information on Grafana alerts, refer to [Alerting](ref:alerting).
|
||||
For more information on Grafana alerts, refer to [Alerting](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/alerting/).
|
||||
|
||||
## Cross-account observability
|
||||
|
||||
|
|
|
|||
|
|
@ -17,22 +17,6 @@ labels:
|
|||
menuTitle: Template variables
|
||||
title: CloudWatch template variables
|
||||
weight: 300
|
||||
refs:
|
||||
variable-syntax:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/variable-syntax/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/variable-syntax/
|
||||
add-template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/
|
||||
variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
|
||||
---
|
||||
|
||||
# CloudWatch template variables
|
||||
|
|
@ -42,7 +26,7 @@ Grafana lists these variables in drop-down select boxes at the top of the dashbo
|
|||
|
||||
<!-- Grafana refers to such variables as template variables. -->
|
||||
|
||||
For an introduction to templating and template variables, refer to [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables).
|
||||
For an introduction to templating and template variables, refer to [Templating](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/dashboards/variables/) and [Add and manage variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/).
|
||||
|
||||
## Use query variables
|
||||
|
||||
|
|
@ -69,7 +53,7 @@ For details about the metrics CloudWatch provides, refer to the [CloudWatch docu
|
|||
### Use variables in queries
|
||||
|
||||
Use the Grafana variable syntax to include variables in queries. A query variable in dynamically retrieves values from your data source using a query.
|
||||
For details, refer to the [variable syntax documentation](ref:variable-syntax).
|
||||
For details, refer to the [variable syntax documentation](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/dashboards/variables/variable-syntax/).
|
||||
|
||||
## Use ec2_instance_attribute
|
||||
|
||||
|
|
|
|||
|
|
@ -18,37 +18,11 @@ labels:
|
|||
menuTitle: Troubleshooting
|
||||
title: Troubleshoot Amazon CloudWatch data source issues
|
||||
weight: 500
|
||||
refs:
|
||||
configure-cloudwatch:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/configure/
|
||||
cloudwatch-aws-authentication:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/
|
||||
cloudwatch-template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/template-variables/
|
||||
cloudwatch-query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/query-editor/
|
||||
private-data-source-connect:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
---
|
||||
|
||||
# Troubleshoot Amazon CloudWatch data source issues
|
||||
|
||||
This document provides solutions to common issues you may encounter when configuring or using the Amazon CloudWatch data source. For configuration instructions, refer to [Configure CloudWatch](ref:configure-cloudwatch).
|
||||
This document provides solutions to common issues you may encounter when configuring or using the Amazon CloudWatch data source. For configuration instructions, refer to [Configure CloudWatch](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/configure/).
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The data source health check validates both metrics and logs permissions. If your IAM policy only grants access to one of these (for example, metrics-only or logs-only), the health check displays a red status. However, the service you have permissions for is still usable—you can query metrics or logs based on whichever permissions are configured.
|
||||
|
|
@ -68,13 +42,13 @@ These errors occur when AWS credentials are invalid, missing, or don't have the
|
|||
|
||||
**Possible causes and solutions:**
|
||||
|
||||
| Cause | Solution |
|
||||
| --------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| IAM policy missing required permissions | Attach the appropriate IAM policy to your user or role. For metrics, you need `cloudwatch:ListMetrics`, `cloudwatch:GetMetricData`, and related permissions. For logs, you need `logs:DescribeLogGroups`, `logs:StartQuery`, `logs:GetQueryResults`, and related permissions. Refer to [Configure CloudWatch](ref:configure-cloudwatch) for complete policy examples. |
|
||||
| Incorrect access key or secret key | Verify the credentials in the AWS Console under **IAM** > **Users** > your user > **Security credentials**. Generate new credentials if necessary. |
|
||||
| Credentials have expired | For temporary credentials, generate new ones. For access keys, verify they haven't been deactivated or deleted. |
|
||||
| Wrong AWS region | Verify the default region in the data source configuration matches where your resources are located. |
|
||||
| Assume Role ARN is incorrect | Verify the role ARN format: `arn:aws:iam::<account-id>:role/<role-name>`. Check that the role exists in the AWS Console. |
|
||||
| Cause | Solution |
|
||||
| --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| IAM policy missing required permissions | Attach the appropriate IAM policy to your user or role. For metrics, you need `cloudwatch:ListMetrics`, `cloudwatch:GetMetricData`, and related permissions. For logs, you need `logs:DescribeLogGroups`, `logs:StartQuery`, `logs:GetQueryResults`, and related permissions. Refer to [Configure CloudWatch](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/configure/) for complete policy examples. |
|
||||
| Incorrect access key or secret key | Verify the credentials in the AWS Console under **IAM** > **Users** > your user > **Security credentials**. Generate new credentials if necessary. |
|
||||
| Credentials have expired | For temporary credentials, generate new ones. For access keys, verify they haven't been deactivated or deleted. |
|
||||
| Wrong AWS region | Verify the default region in the data source configuration matches where your resources are located. |
|
||||
| Assume Role ARN is incorrect | Verify the role ARN format: `arn:aws:iam::<account-id>:role/<role-name>`. Check that the role exists in the AWS Console. |
|
||||
|
||||
### "Unable to assume role"
|
||||
|
||||
|
|
@ -130,7 +104,7 @@ These errors occur when AWS credentials are invalid, missing, or don't have the
|
|||
- ECS task role (if running in ECS)
|
||||
- EKS service account (if running in EKS)
|
||||
1. Ensure the Grafana process has permission to read the credentials file.
|
||||
1. For EKS with IRSA, set the pod's security context to allow user 472 (grafana) to access the projected token. Refer to [AWS authentication](ref:cloudwatch-aws-authentication) for details.
|
||||
1. For EKS with IRSA, set the pod's security context to allow user 472 (grafana) to access the projected token. Refer to [AWS authentication](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/) for details.
|
||||
|
||||
### Credentials file not found
|
||||
|
||||
|
|
@ -163,7 +137,7 @@ These errors occur when Grafana cannot reach AWS CloudWatch endpoints.
|
|||
1. Verify network connectivity from the Grafana server to AWS endpoints.
|
||||
1. Check firewall rules allow outbound HTTPS (port 443) to AWS services.
|
||||
1. If using a VPC, ensure proper NAT gateway or VPC endpoint configuration.
|
||||
1. For Grafana Cloud connecting to private resources, configure [Private data source connect](ref:private-data-source-connect).
|
||||
1. For Grafana Cloud connecting to private resources, configure [Private data source connect](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/).
|
||||
1. Check if the default region is correct—incorrect regions may cause longer timeouts.
|
||||
1. Increase the timeout settings if queries involve large data volumes.
|
||||
|
||||
|
|
@ -360,7 +334,7 @@ These errors occur when using template variables with the CloudWatch data source
|
|||
1. For dependent variables, ensure parent variables have valid selections.
|
||||
1. Verify the region is set correctly (use "default" for the data source's default region).
|
||||
|
||||
For more information on template variables, refer to [CloudWatch template variables](ref:cloudwatch-template-variables).
|
||||
For more information on template variables, refer to [CloudWatch template variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/template-variables/).
|
||||
|
||||
### Multi-value template variables cause query failures
|
||||
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ keywords:
|
|||
- grafana
|
||||
- opentsdb
|
||||
- guide
|
||||
- time series
|
||||
- tsdb
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
|
|
@ -16,134 +18,90 @@ labels:
|
|||
menuTitle: OpenTSDB
|
||||
title: OpenTSDB data source
|
||||
weight: 1100
|
||||
last_reviewed: 2026-01-28
|
||||
refs:
|
||||
provisioning-data-sources:
|
||||
configure-opentsdb:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
variables:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/configure/
|
||||
query-editor-opentsdb:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
|
||||
data-source-management:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/query-editor/
|
||||
template-variables-opentsdb:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/template-variables/
|
||||
alerting-opentsdb:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/alerting/
|
||||
annotations-opentsdb:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/annotations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/annotations/
|
||||
troubleshooting-opentsdb:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/troubleshooting/
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
---
|
||||
|
||||
# OpenTSDB data source
|
||||
|
||||
Grafana ships with advanced support for OpenTSDB.
|
||||
This topic explains configuration, variables, querying, and other features specific to the OpenTSDB data source.
|
||||
Grafana ships with support for OpenTSDB, an open source time series database built on top of HBase. Use the OpenTSDB data source to visualize metrics, create alerts, and build dashboards from your time series data.
|
||||
|
||||
For instructions on how to add a data source to Grafana, refer to the [administration documentation](ref:data-source-management).
|
||||
Only users with the organization administrator role can add data sources.
|
||||
Administrators can also [configure the data source via YAML](#provision-the-data-source) with Grafana's provisioning system.
|
||||
## Supported features
|
||||
|
||||
## OpenTSDB settings
|
||||
The OpenTSDB data source supports the following features:
|
||||
|
||||
To configure basic settings for the data source, complete the following steps:
|
||||
| Feature | Supported | Notes |
|
||||
| ------------------ | --------- | -------------------------------------------------------------------- |
|
||||
| Metrics queries | Yes | Query time series data with aggregation, downsampling, and filtering |
|
||||
| Alerting | Yes | Create alert rules based on OpenTSDB queries |
|
||||
| Annotations | Yes | Overlay events on graphs using metric-specific or global annotations |
|
||||
| Template variables | Yes | Use dynamic variables in queries |
|
||||
| Explore | Yes | Ad-hoc data exploration without dashboards |
|
||||
|
||||
1. Click **Connections** in the left-side menu.
|
||||
1. Under Your connections, click **Data sources**.
|
||||
1. Enter `OpenTSDB` in the search bar.
|
||||
1. Select **OpenTSDB**.
|
||||
## Supported OpenTSDB versions
|
||||
|
||||
The **Settings** tab of the data source is displayed.
|
||||
The data source supports OpenTSDB versions 2.1 through 2.4. Some features are version-specific:
|
||||
|
||||
1. Set the data source's basic configuration options:
|
||||
| Feature | Minimum version |
|
||||
| ------------- | --------------- |
|
||||
| Filters | 2.2 |
|
||||
| Fill policies | 2.2 |
|
||||
| Explicit tags | 2.3 |
|
||||
|
||||
| Name | Description |
|
||||
| ------------------- | ---------------------------------------------------------------------------------------- |
|
||||
| **Name** | The data source name. This is how you refer to the data source in panels and queries. |
|
||||
| **Default** | Default data source that will be be pre-selected for new panels. |
|
||||
| **URL** | The HTTP protocol, IP, and port of your OpenTSDB server (default port is usually 4242). |
|
||||
| **Allowed cookies** | Listing of cookies to forward to the data source. |
|
||||
| **Version** | The OpenTSDB version (supported versions are: 2.4, 2.3, 2.2 and versions less than 2.1). |
|
||||
| **Resolution** | Metrics from OpenTSDB may have data points with either second or millisecond resolution. |
|
||||
| **Lookup limit** | Default is 1000. |
|
||||
## Get started
|
||||
|
||||
### Provision the data source
|
||||
The following documents help you get started with the OpenTSDB data source:
|
||||
|
||||
You can define and configure the data source in YAML files as part of Grafana's provisioning system.
|
||||
For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-data-sources).
|
||||
- [Configure the OpenTSDB data source](ref:configure-opentsdb) - Set up authentication and connect to OpenTSDB.
|
||||
- [OpenTSDB query editor](ref:query-editor-opentsdb) - Create and edit queries with aggregation, downsampling, and filtering.
|
||||
- [Template variables](ref:template-variables-opentsdb) - Create dynamic dashboards with OpenTSDB variables.
|
||||
- [Troubleshooting](ref:troubleshooting-opentsdb) - Solve common configuration and query errors.
|
||||
|
||||
#### Provisioning example
|
||||
## Additional features
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
After you have configured the OpenTSDB data source, you can:
|
||||
|
||||
datasources:
|
||||
- name: OpenTSDB
|
||||
type: opentsdb
|
||||
access: proxy
|
||||
url: http://localhost:4242
|
||||
jsonData:
|
||||
tsdbResolution: 1
|
||||
tsdbVersion: 1
|
||||
```
|
||||
- Add [Annotations](ref:annotations-opentsdb) to overlay OpenTSDB events on your graphs.
|
||||
- Configure and use [Template variables](ref:template-variables-opentsdb) for dynamic dashboards.
|
||||
- Set up [Alerting](ref:alerting-opentsdb) rules based on your time series queries.
|
||||
- Use [Explore](ref:explore) to investigate your OpenTSDB data without building a dashboard.
|
||||
|
||||
## Query editor
|
||||
## Related resources
|
||||
|
||||
Open a graph in edit mode by click the title. Query editor will differ if the data source has version <=2.1 or = 2.2.
|
||||
In the former version, only tags can be used to query OpenTSDB. But in the latter version, filters as well as tags
|
||||
can be used to query OpenTSDB. Fill Policy is also introduced in OpenTSDB 2.2.
|
||||
|
||||

|
||||
|
||||
{{< admonition type="note" >}}
|
||||
While using OpenTSDB 2.2 data source, make sure you use either Filters or Tags as they are mutually exclusive. If used together, might give you weird results.
|
||||
{{< /admonition >}}
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
When using OpenTSDB 2.4 with alerting, queries are executed with the parameter `arrays=true`. This causes OpenTSDB to return data points as an array of arrays instead of a map of key-value pairs. Grafana then converts this data into the appropriate data frame format.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Auto complete suggestions
|
||||
|
||||
As you begin typing metric names, tag names, or tag values, highlighted autocomplete suggestions will appear.
|
||||
The autocomplete only works if the OpenTSDB suggest API is enabled.
|
||||
|
||||
## Templating queries
|
||||
|
||||
Instead of hard-coding things like server, application and sensor name in your metric queries you can use variables in their place.
|
||||
Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns make it easy to change the data
|
||||
being displayed in your dashboard.
|
||||
|
||||
Check out the [Templating](ref:variables) documentation for an introduction to the templating feature and the different
|
||||
types of template variables.
|
||||
|
||||
### Query variable
|
||||
|
||||
Grafana's OpenTSDB data source supports template variable queries. This means you can create template variables
|
||||
that fetch the values from OpenTSDB. For example, metric names, tag names, or tag values.
|
||||
|
||||
When using OpenTSDB with a template variable of `query` type you can use following syntax for lookup.
|
||||
|
||||
| Query | Description |
|
||||
| --------------------------- | --------------------------------------------------------------------------------- |
|
||||
| `metrics(prefix)` | Returns metric names with specific prefix (can be empty) |
|
||||
| `tag_names(cpu)` | Returns tag names (i.e. keys) for a specific cpu metric |
|
||||
| `tag_values(cpu, hostname)` | Returns tag values for metric cpu and tag key hostname |
|
||||
| `suggest_tagk(prefix)` | Returns tag names (i.e. keys) for all metrics with specific prefix (can be empty) |
|
||||
| `suggest_tagv(prefix)` | Returns tag values for all metrics with specific prefix (can be empty) |
|
||||
|
||||
If you do not see template variables being populated in `Preview of values` section, you need to enable
|
||||
`tsd.core.meta.enable_realtime_ts` in the OpenTSDB server settings. Also, to populate metadata of
|
||||
the existing time series data in OpenTSDB, you need to run `tsdb uid metasync` on the OpenTSDB server.
|
||||
|
||||
### Nested templating
|
||||
|
||||
One template variable can be used to filter tag values for another template variable. First parameter is the metric name,
|
||||
second parameter is the tag key for which you need to find tag values, and after that all other dependent template variables.
|
||||
Some examples are mentioned below to make nested template queries work successfully.
|
||||
|
||||
| Query | Description |
|
||||
| ----------------------------------------------------- | -------------------------------------------------------------------------------------------------------- |
|
||||
| `tag_values(cpu, hostname, env=$env)` | Return tag values for cpu metric, selected env tag value and tag key hostname |
|
||||
| `tag_values(cpu, hostname, env=$env, region=$region)` | Return tag values for cpu metric, selected env tag value, selected region tag value and tag key hostname |
|
||||
|
||||
For details on OpenTSDB metric queries, check out the official [OpenTSDB documentation](http://opentsdb.net/docs/build/html/index.html)
|
||||
- [Official OpenTSDB documentation](http://opentsdb.net/docs/build/html/index.html)
|
||||
- [Grafana community forums](https://community.grafana.com/)
|
||||
|
|
|
|||
195
docs/sources/datasources/opentsdb/alerting/index.md
Normal file
195
docs/sources/datasources/opentsdb/alerting/index.md
Normal file
|
|
@ -0,0 +1,195 @@
|
|||
---
|
||||
description: Use Grafana Alerting with the OpenTSDB data source
|
||||
keywords:
|
||||
- grafana
|
||||
- opentsdb
|
||||
- alerting
|
||||
- alerts
|
||||
- notifications
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Alerting
|
||||
title: OpenTSDB alerting
|
||||
weight: 400
|
||||
last_reviewed: 2026-01-28
|
||||
refs:
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/
|
||||
create-alert-rule:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/alerting-rules/create-grafana-managed-rule/
|
||||
configure-opentsdb:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/configure/
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/query-editor/
|
||||
troubleshooting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/troubleshooting/
|
||||
---
|
||||
|
||||
# OpenTSDB alerting
|
||||
|
||||
You can use Grafana Alerting with OpenTSDB to create alerts based on your time series data. This allows you to monitor metrics, detect anomalies, and receive notifications when specific conditions are met.
|
||||
|
||||
For general information about Grafana Alerting, refer to [Grafana Alerting](ref:alerting).
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before creating alerts with OpenTSDB, ensure you have:
|
||||
|
||||
- An OpenTSDB data source configured in Grafana. Refer to [Configure the OpenTSDB data source](ref:configure-opentsdb).
|
||||
- Appropriate permissions to create alert rules.
|
||||
- Understanding of the metrics you want to monitor.
|
||||
|
||||
## Supported features
|
||||
|
||||
OpenTSDB alerting works with standard metric queries that return time series data. The following table summarizes alerting compatibility:
|
||||
|
||||
| Query type | Alerting support | Notes |
|
||||
| ----------------------------- | ---------------- | ------------------------------------ |
|
||||
| Metrics with aggregation | Yes | Recommended for alerting |
|
||||
| Metrics with downsampling | Yes | Use appropriate intervals |
|
||||
| Metrics with rate calculation | Yes | Useful for counter metrics |
|
||||
| Metrics with filters/tags | Yes | Filter to specific hosts or services |
|
||||
|
||||
## Create an alert rule
|
||||
|
||||
To create an alert rule using OpenTSDB:
|
||||
|
||||
1. Navigate to **Alerting** > **Alert rules**.
|
||||
1. Click **New alert rule**.
|
||||
1. Enter a name for the alert rule.
|
||||
1. Select your **OpenTSDB** data source.
|
||||
1. Build your query:
|
||||
- Select the metric to monitor.
|
||||
- Choose an appropriate aggregator (for example, `avg`, `sum`, `max`).
|
||||
- Add tag filters to target specific resources.
|
||||
- Enable downsampling with an interval matching your evaluation frequency.
|
||||
1. Configure the alert condition (for example, when the value is above a threshold).
|
||||
1. Set the evaluation interval and pending period.
|
||||
1. Configure notifications and labels.
|
||||
1. Click **Save rule**.
|
||||
|
||||
For detailed instructions, refer to [Create a Grafana-managed alert rule](ref:create-alert-rule).
|
||||
|
||||
## Example alert queries
|
||||
|
||||
The following examples show common alerting scenarios with OpenTSDB.
|
||||
|
||||
### Alert on high CPU usage
|
||||
|
||||
Monitor CPU usage and alert when it exceeds 90%:
|
||||
|
||||
| Field | Value |
|
||||
| --------------------- | -------------- |
|
||||
| Metric | `sys.cpu.user` |
|
||||
| Aggregator | `avg` |
|
||||
| Tags | `host=*` |
|
||||
| Downsample Interval | `1m` |
|
||||
| Downsample Aggregator | `avg` |
|
||||
|
||||
**Condition:** When average is above `90`
|
||||
|
||||
### Alert on low disk space
|
||||
|
||||
Monitor available disk space and alert when it drops below a threshold:
|
||||
|
||||
| Field | Value |
|
||||
| --------------------- | ------------------- |
|
||||
| Metric | `sys.disk.free` |
|
||||
| Aggregator | `min` |
|
||||
| Tags | `host=*`, `mount=/` |
|
||||
| Downsample Interval | `5m` |
|
||||
| Downsample Aggregator | `min` |
|
||||
|
||||
**Condition:** When minimum is below `10737418240` (10 GB in bytes)
|
||||
|
||||
### Alert on high network traffic rate
|
||||
|
||||
Monitor network bytes received and alert on high traffic:
|
||||
|
||||
| Field | Value |
|
||||
| --------------------- | -------------------- |
|
||||
| Metric | `net.bytes.received` |
|
||||
| Aggregator | `sum` |
|
||||
| Tags | `host=webserver01` |
|
||||
| Rate | enabled |
|
||||
| Counter | enabled |
|
||||
| Downsample Interval | `1m` |
|
||||
| Downsample Aggregator | `avg` |
|
||||
|
||||
**Condition:** When sum is above `104857600` (100 MB/s in bytes)
|
||||
|
||||
### Alert on error count spike
|
||||
|
||||
Monitor application error counts:
|
||||
|
||||
| Field | Value |
|
||||
| --------------------- | ------------------------------- |
|
||||
| Metric | `app.errors.count` |
|
||||
| Aggregator | `sum` |
|
||||
| Tags | `service=api`, `env=production` |
|
||||
| Downsample Interval | `5m` |
|
||||
| Downsample Aggregator | `sum` |
|
||||
|
||||
**Condition:** When sum is above `100`
|
||||
|
||||
## Limitations
|
||||
|
||||
When using OpenTSDB with Grafana Alerting, be aware of the following limitations.
|
||||
|
||||
### Template variables not supported
|
||||
|
||||
Alert queries can't contain template variables. Grafana evaluates alert rules on the backend without dashboard context, so variables like `$hostname` or `$environment` aren't resolved.
|
||||
|
||||
If your dashboard query uses template variables, create a separate query for alerting with hard-coded values.
|
||||
|
||||
### Query complexity
|
||||
|
||||
Complex queries with many tags or long time ranges may timeout or fail to evaluate. Simplify queries for alerting by:
|
||||
|
||||
- Using specific tag filters instead of wildcards where possible.
|
||||
- Enabling downsampling with appropriate intervals.
|
||||
- Reducing the evaluation time range.
|
||||
|
||||
### OpenTSDB 2.4 behavior
|
||||
|
||||
When using OpenTSDB 2.4 with alerting, Grafana executes queries with the parameter `arrays=true`. This causes OpenTSDB to return data points as an array of arrays instead of a map of key-value pairs. Grafana automatically converts this data to the appropriate format.
|
||||
|
||||
## Best practices
|
||||
|
||||
Follow these best practices when creating OpenTSDB alerts:
|
||||
|
||||
- **Use specific tag filters:** Add tag filters to focus on relevant resources and improve query performance.
|
||||
- **Match downsample interval to evaluation:** Set the downsample interval to match or be slightly smaller than your alert evaluation interval.
|
||||
- **Test queries first:** Verify your query returns expected results in [Explore](ref:explore) before creating an alert.
|
||||
- **Set realistic thresholds:** Base alert thresholds on historical data patterns to avoid false positives.
|
||||
- **Use meaningful names:** Give alert rules descriptive names that indicate what they monitor.
|
||||
- **Enable downsampling:** Always enable downsampling for alerting queries to reduce data volume and improve reliability.
|
||||
- **Consider counter resets:** For counter metrics, enable the Counter option and set appropriate max values to handle resets correctly.
|
||||
|
||||
## Next steps
|
||||
|
||||
- [Build queries](ref:query-editor) to explore your metrics before creating alerts.
|
||||
- [Troubleshoot issues](ref:troubleshooting) if alerts aren't firing as expected.
|
||||
254
docs/sources/datasources/opentsdb/annotations/index.md
Normal file
254
docs/sources/datasources/opentsdb/annotations/index.md
Normal file
|
|
@ -0,0 +1,254 @@
|
|||
---
|
||||
description: Use annotations with the OpenTSDB data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- opentsdb
|
||||
- annotations
|
||||
- events
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Annotations
|
||||
title: OpenTSDB annotations
|
||||
weight: 450
|
||||
last_reviewed: 2026-01-28
|
||||
refs:
|
||||
annotations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/query-editor/
|
||||
template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/template-variables/
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/alerting/
|
||||
---
|
||||
|
||||
# OpenTSDB annotations
|
||||
|
||||
Annotations allow you to overlay event information on graphs, providing context for metric changes. The OpenTSDB data source supports both metric-specific annotations and global annotations stored in OpenTSDB.
|
||||
|
||||
For general information about annotations in Grafana, refer to [Annotate visualizations](ref:annotations).
|
||||
|
||||
## Annotation types
|
||||
|
||||
OpenTSDB supports two types of annotations:
|
||||
|
||||
| Type | Description | Use case |
|
||||
| ---------------------- | ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------- |
|
||||
| **Metric annotations** | Annotations attached to a specific time series (TSUID). Retrieved by querying the associated metric. | Track events affecting a specific host or service. |
|
||||
| **Global annotations** | Annotations not tied to any time series. Apply system-wide. | Track deployments, maintenance windows, or infrastructure-wide events. |
|
||||
|
||||
## How Grafana retrieves annotations
|
||||
|
||||
When you configure an annotation query, Grafana queries OpenTSDB for the specified metric and retrieves any annotations associated with that metric's time series. The query includes the `globalAnnotations=true` parameter, which allows Grafana to also retrieve global annotations when enabled.
|
||||
|
||||
Grafana displays the `description` field from each annotation as the annotation text.
|
||||
|
||||
## Configure an annotation query
|
||||
|
||||
To add OpenTSDB annotations to a dashboard:
|
||||
|
||||
1. Click the dashboard settings icon (gear) in the top navigation.
|
||||
1. Select **Annotations** in the left menu.
|
||||
1. Click **Add annotation query**.
|
||||
1. Select the **OpenTSDB** data source.
|
||||
1. Configure the annotation query fields as described in the following table.
|
||||
1. Click **Save dashboard**.
|
||||
|
||||
## Annotation query fields
|
||||
|
||||
| Field | Description |
|
||||
| --------------------------- | -------------------------------------------------------------------------------- |
|
||||
| **Name** | A descriptive name for this annotation query. Appears in the annotation legend. |
|
||||
| **Data source** | Select the OpenTSDB data source. |
|
||||
| **Enabled** | Toggle to enable or disable this annotation query. |
|
||||
| **OpenTSDB metrics query** | The metric name to query for annotations (for example, `events.deployment`). |
|
||||
| **Show Global Annotations** | Toggle to include global annotations that aren't tied to a specific time series. |
|
||||
|
||||
## Example annotation queries
|
||||
|
||||
The following examples demonstrate common annotation use cases.
|
||||
|
||||
### Track application deployments
|
||||
|
||||
Monitor when deployments occur for a specific application:
|
||||
|
||||
| Field | Value |
|
||||
| ----------------------- | --------------- |
|
||||
| Name | App Deployments |
|
||||
| OpenTSDB metrics query | `deploy.myapp` |
|
||||
| Show Global Annotations | disabled |
|
||||
|
||||
This query retrieves annotations attached to the `deploy.myapp` metric, showing deployment events for that specific application.
|
||||
|
||||
### Monitor infrastructure-wide events
|
||||
|
||||
Capture system-wide events such as network changes or datacenter maintenance:
|
||||
|
||||
| Field | Value |
|
||||
| ----------------------- | ----------------------- |
|
||||
| Name | Infrastructure Events |
|
||||
| OpenTSDB metrics query | `events.infrastructure` |
|
||||
| Show Global Annotations | enabled |
|
||||
|
||||
This query retrieves both metric-specific and global annotations, providing a complete picture of infrastructure events.
|
||||
|
||||
### Track incidents and outages
|
||||
|
||||
Mark incident start and resolution times:
|
||||
|
||||
| Field | Value |
|
||||
| ----------------------- | ----------------- |
|
||||
| Name | Incidents |
|
||||
| OpenTSDB metrics query | `events.incident` |
|
||||
| Show Global Annotations | enabled |
|
||||
|
||||
### Monitor configuration changes
|
||||
|
||||
Track when configuration changes are applied:
|
||||
|
||||
| Field | Value |
|
||||
| ----------------------- | --------------- |
|
||||
| Name | Config Changes |
|
||||
| OpenTSDB metrics query | `events.config` |
|
||||
| Show Global Annotations | disabled |
|
||||
|
||||
### Correlate multiple event types
|
||||
|
||||
You can add multiple annotation queries to a single dashboard to correlate different event types. For example:
|
||||
|
||||
1. Add a "Deployments" annotation query for `deploy.*` metrics.
|
||||
1. Add an "Incidents" annotation query for `events.incident`.
|
||||
1. Add a "Maintenance" annotation query with global annotations enabled.
|
||||
|
||||
This allows you to see how deployments, incidents, and maintenance windows relate to your metric data.
|
||||
|
||||
## How annotations appear
|
||||
|
||||
Annotations appear as vertical lines on time series panels at the timestamps where events occurred. Hover over an annotation marker to view:
|
||||
|
||||
- The annotation name (from your query configuration)
|
||||
- The event description (from the OpenTSDB annotation's `description` field)
|
||||
- The timestamp
|
||||
|
||||
Different annotation queries can be assigned different colors in the dashboard settings to distinguish between event types.
|
||||
|
||||
## Create annotations in OpenTSDB
|
||||
|
||||
To display annotations in Grafana, you must first create them in OpenTSDB. OpenTSDB provides an HTTP API for managing annotations.
|
||||
|
||||
### Annotation data structure
|
||||
|
||||
OpenTSDB annotations have the following fields:
|
||||
|
||||
| Field | Required | Description |
|
||||
| ------------- | -------- | ------------------------------------------------------------------------------------------ |
|
||||
| `startTime` | Yes | Unix epoch timestamp in seconds when the event started. |
|
||||
| `endTime` | No | Unix epoch timestamp in seconds when the event ended. Useful for duration-based events. |
|
||||
| `tsuid` | No | The time series UID to associate this annotation with. If empty, the annotation is global. |
|
||||
| `description` | No | Brief description of the event. This text displays in Grafana. |
|
||||
| `notes` | No | Detailed notes about the event. |
|
||||
| `custom` | No | A map of custom key-value pairs for additional metadata. |
|
||||
|
||||
### Create a global annotation
|
||||
|
||||
Use the OpenTSDB API to create a global annotation:
|
||||
|
||||
```sh
|
||||
curl -X POST http://<OPENTSDB_HOST>:4242/api/annotation \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"startTime": 1609459200,
|
||||
"description": "Production deployment v2.5.0",
|
||||
"notes": "Deployed new feature flags and performance improvements",
|
||||
"custom": {
|
||||
"version": "2.5.0",
|
||||
"environment": "production",
|
||||
"deployer": "jenkins"
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
### Create a metric-specific annotation
|
||||
|
||||
To attach an annotation to a specific time series, include the `tsuid`:
|
||||
|
||||
```sh
|
||||
curl -X POST http://<OPENTSDB_HOST>:4242/api/annotation \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"startTime": 1609459200,
|
||||
"endTime": 1609462800,
|
||||
"tsuid": "000001000001000001",
|
||||
"description": "Server maintenance",
|
||||
"notes": "Scheduled maintenance window for hardware upgrade"
|
||||
}'
|
||||
```
|
||||
|
||||
To find the TSUID for a metric, use the OpenTSDB `/api/uid/tsmeta` endpoint.
|
||||
|
||||
### Create annotations programmatically
|
||||
|
||||
Integrate annotation creation into your deployment pipelines or monitoring systems:
|
||||
|
||||
**Deployment script example:**
|
||||
|
||||
```sh
|
||||
#!/bin/bash
|
||||
VERSION=$1
|
||||
TIMESTAMP=$(date +%s)
|
||||
|
||||
curl -X POST http://opentsdb.example.com:4242/api/annotation \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{
|
||||
\"startTime\": $TIMESTAMP,
|
||||
\"description\": \"Deployed version $VERSION\",
|
||||
\"custom\": {
|
||||
\"version\": \"$VERSION\",
|
||||
\"environment\": \"production\"
|
||||
}
|
||||
}"
|
||||
```
|
||||
|
||||
For more details on the annotation API, refer to the [OpenTSDB annotation API documentation](http://opentsdb.net/docs/build/html/api_http/annotation/index.html).
|
||||
|
||||
## Troubleshoot annotation issues
|
||||
|
||||
The following section addresses common issues you may encounter when using OpenTSDB annotations.
|
||||
|
||||
### Annotations don't appear
|
||||
|
||||
**Possible causes and solutions:**
|
||||
|
||||
| Cause | Solution |
|
||||
| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Time range doesn't include annotations | Expand the dashboard time range to include the annotation timestamps. |
|
||||
| Wrong metric name | Verify the metric name in your annotation query matches the metric associated with the annotations in OpenTSDB. |
|
||||
| Annotations are global but toggle is off | Enable **Show Global Annotations** if your annotations don't have a TSUID. |
|
||||
| No annotations exist | Verify annotations exist in OpenTSDB using the API: `curl http://<OPENTSDB_HOST>:4242/api/annotation?startTime=<START>&endTime=<END>` |
|
||||
|
||||
### Annotation text is empty
|
||||
|
||||
The annotation displays but has no description text.
|
||||
|
||||
**Solution:** Ensure the `description` field is populated when creating annotations in OpenTSDB. Grafana displays the `description` field as the annotation text.
|
||||
|
||||
## Next steps
|
||||
|
||||
- [Build queries](ref:query-editor) to visualize metrics alongside annotations.
|
||||
- [Use template variables](ref:template-variables) to create dynamic dashboards.
|
||||
- [Set up alerting](ref:alerting) to get notified when metrics cross thresholds.
|
||||
290
docs/sources/datasources/opentsdb/configure/index.md
Normal file
290
docs/sources/datasources/opentsdb/configure/index.md
Normal file
|
|
@ -0,0 +1,290 @@
|
|||
---
|
||||
description: Configure the OpenTSDB data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- opentsdb
|
||||
- configuration
|
||||
- provisioning
|
||||
- terraform
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Configure
|
||||
title: Configure the OpenTSDB data source
|
||||
weight: 100
|
||||
last_reviewed: 2026-01-28
|
||||
refs:
|
||||
provisioning-data-sources:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
troubleshooting-opentsdb:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/troubleshooting/
|
||||
query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/query-editor/
|
||||
template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/template-variables/
|
||||
annotations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/annotations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/annotations/
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/alerting/
|
||||
---
|
||||
|
||||
# Configure the OpenTSDB data source
|
||||
|
||||
This document explains how to configure the OpenTSDB data source in Grafana.
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before configuring the OpenTSDB data source, ensure you have:
|
||||
|
||||
- **Grafana permissions:** Organization administrator role to add data sources.
|
||||
- **OpenTSDB instance:** A running OpenTSDB server (version 2.1 or later recommended).
|
||||
- **Network access:** The Grafana server can reach the OpenTSDB HTTP API endpoint (default port 4242).
|
||||
- **Metrics in OpenTSDB:** For autocomplete to work, metrics must exist in your OpenTSDB database.
|
||||
|
||||
## Add the data source
|
||||
|
||||
To add and configure the OpenTSDB data source:
|
||||
|
||||
1. Click **Connections** in the left-side menu.
|
||||
1. Click **Add new connection**.
|
||||
1. Type `OpenTSDB` in the search bar.
|
||||
1. Select **OpenTSDB**.
|
||||
1. Click **Add new data source** in the upper right.
|
||||
1. Configure the data source settings as described in the following sections.
|
||||
|
||||
## Configuration options
|
||||
|
||||
The following table describes the available configuration options:
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------- | ------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| **Name** | The data source name. This is how you refer to the data source in panels and queries. |
|
||||
| **Default** | Toggle to make this the default data source for new panels. |
|
||||
| **URL** | The HTTP protocol, IP address, and port of your OpenTSDB server. The default port is `4242`. Example: `http://localhost:4242`. |
|
||||
| **Allowed cookies** | Cookies to forward to the data source. Use this when your OpenTSDB server requires specific cookies for authentication. |
|
||||
| **Timeout** | HTTP request timeout in seconds. Increase this value for slow networks or complex queries. |
|
||||
|
||||
## Auth settings
|
||||
|
||||
Configure authentication if your OpenTSDB server requires it:
|
||||
|
||||
| Setting | Description |
|
||||
| ----------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| **Basic auth** | Enable to authenticate with a username and password. When enabled, enter the username and password in the fields that appear. |
|
||||
| **With Credentials** | Enable to send cookies or auth headers with cross-site requests. Use this when OpenTSDB is on a different domain and requires credentials. |
|
||||
| **TLS Client Authentication** | Enable to use client certificates for authentication. Requires configuring client certificate and key. |
|
||||
| **Skip TLS Verify** | Enable to skip verification of the OpenTSDB server's TLS certificate. Only use this in development environments. |
|
||||
| **Forward OAuth Identity** | Enable to forward the user's OAuth token to the data source. Useful when OpenTSDB is behind an OAuth-protected proxy. |
|
||||
| **Custom HTTP Headers** | Add custom headers to all requests sent to OpenTSDB. Useful for API keys or custom authentication schemes. |
|
||||
|
||||
## OpenTSDB settings
|
||||
|
||||
Configure these settings based on your OpenTSDB server version and configuration:
|
||||
|
||||
| Setting | Description |
|
||||
| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Version** | Select your OpenTSDB version. This affects available query features. Refer to the following section for version-specific features. |
|
||||
| **Resolution** | The resolution of your metric data. Select `second` for second-precision timestamps or `millisecond` for millisecond-precision timestamps. |
|
||||
| **Lookup limit** | Maximum number of results returned by suggest and lookup API calls. Default is `1000`. Increase this if you have many metrics or tag values. |
|
||||
|
||||
### Version-specific features
|
||||
|
||||
The version setting enables different query features in Grafana:
|
||||
|
||||
| Version | Available features |
|
||||
| --------- | -------------------------------------------------------------------------------------------------------------------- |
|
||||
| **<=2.1** | Basic queries with tags. Uses legacy tag-based filtering. |
|
||||
| **==2.2** | Adds filter support (literal_or, wildcard, regexp, and more). Filters replace tags for more flexible queries. |
|
||||
| **==2.3** | Adds explicit tags support for rate calculations and additional filter types. |
|
||||
| **==2.4** | Adds fill policy support for downsampling (none, null, zero, nan). Enables `arrays=true` for alerting compatibility. |
|
||||
|
||||
Select the version that matches your OpenTSDB server. If you're unsure, check your OpenTSDB version with the `/api/version` endpoint.
|
||||
|
||||
## Verify the connection
|
||||
|
||||
Click **Save & test** to verify that Grafana can connect to your OpenTSDB server. A successful test confirms that the URL is correct and the server is responding.
|
||||
|
||||
If the test fails, refer to [Troubleshooting](ref:troubleshooting-opentsdb) for common issues and solutions.
|
||||
|
||||
## Provision the data source
|
||||
|
||||
You can define and configure the data source in YAML files as part of the Grafana provisioning system. For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-data-sources).
|
||||
|
||||
### YAML example
|
||||
|
||||
The following example provisions an OpenTSDB data source:
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: OpenTSDB
|
||||
type: opentsdb
|
||||
access: proxy
|
||||
url: http://localhost:4242
|
||||
jsonData:
|
||||
# OpenTSDB version: 1 = <=2.1, 2 = 2.2, 3 = 2.3, 4 = 2.4
|
||||
tsdbVersion: 3
|
||||
# Resolution: 1 = second, 2 = millisecond
|
||||
tsdbResolution: 1
|
||||
# Maximum results for suggest/lookup API calls
|
||||
lookupLimit: 1000
|
||||
```
|
||||
|
||||
### YAML example with basic authentication
|
||||
|
||||
The following example provisions an OpenTSDB data source with basic authentication:
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: OpenTSDB
|
||||
type: opentsdb
|
||||
access: proxy
|
||||
url: http://localhost:4242
|
||||
basicAuth: true
|
||||
basicAuthUser: <USERNAME>
|
||||
jsonData:
|
||||
tsdbVersion: 3
|
||||
tsdbResolution: 1
|
||||
lookupLimit: 1000
|
||||
secureJsonData:
|
||||
basicAuthPassword: <PASSWORD>
|
||||
```
|
||||
|
||||
### YAML example with custom headers
|
||||
|
||||
The following example provisions an OpenTSDB data source with custom HTTP headers:
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: OpenTSDB
|
||||
type: opentsdb
|
||||
access: proxy
|
||||
url: http://localhost:4242
|
||||
jsonData:
|
||||
tsdbVersion: 3
|
||||
tsdbResolution: 1
|
||||
lookupLimit: 1000
|
||||
httpHeaderName1: X-Custom-Header
|
||||
secureJsonData:
|
||||
httpHeaderValue1: <HEADER_VALUE>
|
||||
```
|
||||
|
||||
The following table describes the available fields:
|
||||
|
||||
| Field | Type | Description |
|
||||
| ---------------------------------- | ------- | ---------------------------------------------------------------------------- |
|
||||
| `basicAuth` | boolean | Enable basic authentication. |
|
||||
| `basicAuthUser` | string | Username for basic authentication. |
|
||||
| `jsonData.tsdbVersion` | number | OpenTSDB version: `1` (<=2.1), `2` (2.2), `3` (2.3), `4` (2.4). |
|
||||
| `jsonData.tsdbResolution` | number | Timestamp resolution: `1` (second), `2` (millisecond). |
|
||||
| `jsonData.lookupLimit` | number | Maximum results for suggest and lookup API calls. Default: `1000`. |
|
||||
| `jsonData.httpHeaderName1` | string | Name of a custom HTTP header. Use incrementing numbers for multiple headers. |
|
||||
| `secureJsonData.basicAuthPassword` | string | Password for basic authentication. |
|
||||
| `secureJsonData.httpHeaderValue1` | string | Value for the custom HTTP header. |
|
||||
|
||||
## Provision with Terraform
|
||||
|
||||
You can provision the OpenTSDB data source using [Terraform](https://www.terraform.io/) with the [Grafana Terraform provider](https://registry.terraform.io/providers/grafana/grafana/latest/docs).
|
||||
|
||||
For more information about provisioning resources with Terraform, refer to the [Grafana as code using Terraform](https://grafana.com/docs/grafana-cloud/developer-resources/infrastructure-as-code/terraform/) documentation.
|
||||
|
||||
### Terraform example
|
||||
|
||||
The following example provisions an OpenTSDB data source:
|
||||
|
||||
```hcl
|
||||
terraform {
|
||||
required_providers {
|
||||
grafana = {
|
||||
source = "grafana/grafana"
|
||||
version = ">= 2.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "grafana" {
|
||||
url = "<YOUR_GRAFANA_URL>"
|
||||
auth = "<YOUR_SERVICE_ACCOUNT_TOKEN>"
|
||||
}
|
||||
|
||||
resource "grafana_data_source" "opentsdb" {
|
||||
type = "opentsdb"
|
||||
name = "OpenTSDB"
|
||||
url = "http://localhost:4242"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
# OpenTSDB version: 1 = <=2.1, 2 = 2.2, 3 = 2.3, 4 = 2.4
|
||||
tsdbVersion = 3
|
||||
# Resolution: 1 = second, 2 = millisecond
|
||||
tsdbResolution = 1
|
||||
# Maximum results for suggest/lookup API calls
|
||||
lookupLimit = 1000
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### Terraform example with basic authentication
|
||||
|
||||
The following example provisions an OpenTSDB data source with basic authentication:
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "opentsdb_auth" {
|
||||
type = "opentsdb"
|
||||
name = "OpenTSDB"
|
||||
url = "http://localhost:4242"
|
||||
basic_auth_enabled = true
|
||||
basic_auth_username = "<USERNAME>"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
tsdbVersion = 3
|
||||
tsdbResolution = 1
|
||||
lookupLimit = 1000
|
||||
})
|
||||
|
||||
secure_json_data_encoded = jsonencode({
|
||||
basicAuthPassword = "<PASSWORD>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
Replace the following placeholders:
|
||||
|
||||
- _`<YOUR_GRAFANA_URL>`_: Your Grafana instance URL (for example, `https://your-org.grafana.net` for Grafana Cloud)
|
||||
- _`<YOUR_SERVICE_ACCOUNT_TOKEN>`_: A service account token with data source permissions
|
||||
- _`<USERNAME>`_: The username for basic authentication
|
||||
- _`<PASSWORD>`_: The password for basic authentication
|
||||
|
||||
## Next steps
|
||||
|
||||
Now that you've configured OpenTSDB, you can:
|
||||
|
||||
- [Query OpenTSDB data](ref:query-editor) to build dashboards and visualizations
|
||||
- [Use template variables](ref:template-variables) to create dynamic, reusable dashboards
|
||||
- [Add annotations](ref:annotations) to overlay events on your graphs
|
||||
- [Set up alerting](ref:alerting) to get notified when metrics cross thresholds
|
||||
- [Troubleshoot issues](ref:troubleshooting-opentsdb) if you encounter problems
|
||||
403
docs/sources/datasources/opentsdb/query-editor/index.md
Normal file
403
docs/sources/datasources/opentsdb/query-editor/index.md
Normal file
|
|
@ -0,0 +1,403 @@
|
|||
---
|
||||
description: Use the OpenTSDB query editor in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- opentsdb
|
||||
- query
|
||||
- editor
|
||||
- metrics
|
||||
- filters
|
||||
- tags
|
||||
- downsampling
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Query editor
|
||||
title: OpenTSDB query editor
|
||||
weight: 200
|
||||
last_reviewed: 2026-01-28
|
||||
refs:
|
||||
troubleshooting-opentsdb:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/troubleshooting/
|
||||
template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/template-variables/
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/alerting/
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
---
|
||||
|
||||
# OpenTSDB query editor
|
||||
|
||||
The query editor allows you to build OpenTSDB queries visually. The available options depend on the OpenTSDB version you configured for the data source.
|
||||
|
||||
## Access the query editor
|
||||
|
||||
The OpenTSDB query editor is located on the [Explore](ref:explore) page. You can also access the OpenTSDB query editor from a dashboard panel. Click the ellipsis in the upper right of the panel and select **Edit**.
|
||||
|
||||
## Create a query
|
||||
|
||||
To create a query:
|
||||
|
||||
1. Select the **OpenTSDB** data source in a panel.
|
||||
1. Configure the query using the sections described in the following documentation.
|
||||
|
||||
## Metric section
|
||||
|
||||
The Metric section contains the core query configuration:
|
||||
|
||||
| Field | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------- |
|
||||
| **Metric** | The metric name to query. Start typing to see autocomplete suggestions from your OpenTSDB server. |
|
||||
| **Aggregator** | The aggregation function to combine multiple time series. Default: `sum`. |
|
||||
| **Alias** | Custom display name for the series. Use `$tag_<tagname>` to include tag values in the alias. |
|
||||
|
||||
### Alias patterns
|
||||
|
||||
The alias field supports dynamic substitution using tag values. Use the pattern `$tag_<tagname>` where `<tagname>` is the name of a tag on your metric.
|
||||
|
||||
| Pattern | Description | Example output |
|
||||
| ---------------------- | ----------------------------------- | -------------------------- |
|
||||
| `$tag_host` | Inserts the value of the `host` tag | `webserver01` |
|
||||
| `$tag_env` | Inserts the value of the `env` tag | `production` |
|
||||
| `$tag_host - CPU` | Combines tag value with static text | `webserver01 - CPU` |
|
||||
| `$tag_host ($tag_env)` | Multiple tag substitutions | `webserver01 (production)` |
|
||||
|
||||
## Downsample section
|
||||
|
||||
Downsampling reduces the number of data points returned by aggregating values over time intervals. This improves query performance and reduces the amount of data transferred.
|
||||
|
||||
| Field | Description |
|
||||
| ------------------------ | ------------------------------------------------------------------------------------------------------------------------ |
|
||||
| **Interval** | The time interval for downsampling. Leave blank to use the automatic interval based on the panel's time range and width. |
|
||||
| **Aggregator** | The aggregation function for downsampling. Default: `avg`. |
|
||||
| **Fill** | (Version 2.2+) The fill policy for missing data points. Default: `none`. |
|
||||
| **Disable downsampling** | Toggle to disable downsampling entirely. Use this when you need raw data points. |
|
||||
|
||||
### Interval format
|
||||
|
||||
The interval field accepts time duration strings:
|
||||
|
||||
| Format | Description | Example |
|
||||
| ------ | ----------- | ------- |
|
||||
| `s` | Seconds | `30s` |
|
||||
| `m` | Minutes | `5m` |
|
||||
| `h` | Hours | `1h` |
|
||||
| `d` | Days | `1d` |
|
||||
| `w` | Weeks | `1w` |
|
||||
|
||||
When the interval is left blank, Grafana automatically calculates an appropriate interval based on the panel's time range and pixel width. This ensures optimal data density for visualization.
|
||||
|
||||
## Filters section
|
||||
|
||||
Filters (available in OpenTSDB 2.2+) provide advanced filtering capabilities that replace the legacy tag-based filtering.
|
||||
|
||||
| Field | Description |
|
||||
| ------------ | --------------------------------------------------------------------------------------------------------------- |
|
||||
| **Key** | The tag key to filter on. Select from autocomplete suggestions or type a custom value. |
|
||||
| **Type** | The filter type. Determines how the filter value is matched. Default: `iliteral_or`. |
|
||||
| **Filter** | The filter value or pattern. Supports autocomplete for tag values. |
|
||||
| **Group by** | Toggle to group results by this tag key. When enabled, separate time series are returned for each unique value. |
|
||||
|
||||
### Add, edit, and remove filters
|
||||
|
||||
To manage filters:
|
||||
|
||||
1. Click the **+** button next to "Filters" to add a new filter.
|
||||
1. Configure the filter fields (Key, Type, Filter, Group by).
|
||||
1. Click **add filter** to apply the filter.
|
||||
1. To edit an existing filter, click the **pencil** icon next to it.
|
||||
1. To remove a filter, click the **x** icon next to it.
|
||||
|
||||
You can add multiple filters to a single query. All filters are combined with AND logic.
|
||||
|
||||
### Filter types
|
||||
|
||||
| Type | Description | Example |
|
||||
| ----------------- | ---------------------------------------------------------- | --------------------- |
|
||||
| `literal_or` | Matches exact values. Use `\|` to specify multiple values. | `web01\|web02\|web03` |
|
||||
| `iliteral_or` | Case-insensitive literal match. | `WEB01\|web02` |
|
||||
| `wildcard` | Matches using `*` as a wildcard character. | `web-*-prod` |
|
||||
| `iwildcard` | Case-insensitive wildcard match. | `WEB-*` |
|
||||
| `regexp` | Matches using regular expressions. | `web-[0-9]+` |
|
||||
| `not_literal_or` | Excludes exact values. | `web01\|web02` |
|
||||
| `not_iliteral_or` | Case-insensitive exclusion. | `TEST\|DEV` |
|
||||
|
||||
### Group by behavior
|
||||
|
||||
When **Group by** is enabled for a filter:
|
||||
|
||||
- Results are split into separate time series for each unique value of the filtered tag.
|
||||
- Each time series is labeled with its tag value.
|
||||
- This is useful for comparing values across hosts, environments, or other dimensions.
|
||||
|
||||
When **Group by** is disabled:
|
||||
|
||||
- All matching time series are combined using the selected aggregator.
|
||||
- A single aggregated time series is returned.
|
||||
|
||||
## Tags section
|
||||
|
||||
Tags filter metrics by key-value pairs. This is the legacy filtering method for OpenTSDB versions prior to 2.2.
|
||||
|
||||
| Field | Description |
|
||||
| --------- | ----------------------------------------------------------------- |
|
||||
| **Key** | The tag key to filter on. Select from autocomplete suggestions. |
|
||||
| **Value** | The tag value to match. Use `*` to match all values for this key. |
|
||||
|
||||
### Add, edit, and remove tags
|
||||
|
||||
To manage tags:
|
||||
|
||||
1. Click the **+** button next to "Tags" to add a new tag.
|
||||
1. Select or type a tag key.
|
||||
1. Select or type a tag value (use `*` for wildcard).
|
||||
1. Click **add tag** to apply the tag filter.
|
||||
1. To edit an existing tag, click the **pencil** icon next to it.
|
||||
1. To remove a tag, click the **x** icon next to it.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Tags are deprecated in OpenTSDB 2.2 and later. Use Filters instead for more powerful filtering options including wildcards, regular expressions, and exclusion patterns.
|
||||
{{< /admonition >}}
|
||||
|
||||
{{< admonition type="caution" >}}
|
||||
Tags and Filters are mutually exclusive. If you have filters defined, you cannot add tags, and vice versa. The query editor displays a warning if you attempt to use both.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Rate section
|
||||
|
||||
The Rate section computes the rate of change, which is essential for counter metrics that continuously increment.
|
||||
|
||||
| Field | Description |
|
||||
| ----------------- | ---------------------------------------------------------------------------------------------------------- |
|
||||
| **Rate** | Toggle to enable rate calculation. Computes the per-second rate of change between consecutive values. |
|
||||
| **Counter** | (When Rate is enabled) Toggle to indicate the metric is a monotonically increasing counter that may reset. |
|
||||
| **Counter max** | (When Counter is enabled) The maximum value before the counter wraps around. |
|
||||
| **Reset value** | (When Counter is enabled) The value the counter resets to after wrapping. Default: `0`. |
|
||||
| **Explicit tags** | (Version 2.3+) Toggle to require all specified tags to exist on matching time series. |
|
||||
|
||||
### When to use rate calculation
|
||||
|
||||
Enable **Rate** when your metric is a continuously increasing counter, such as:
|
||||
|
||||
- Network bytes sent/received
|
||||
- Request counts
|
||||
- Error counts
|
||||
- Disk I/O operations
|
||||
|
||||
The rate calculation converts cumulative values into per-second rates, making the data more meaningful for visualization.
|
||||
|
||||
### Counter settings
|
||||
|
||||
Enable **Counter** when your metric can reset to zero (for example, after a service restart). The counter settings help OpenTSDB calculate correct rates across resets:
|
||||
|
||||
- **Counter max**: Set this to the maximum value your counter can reach before wrapping. For 64-bit counters, use `18446744073709551615`. For 32-bit counters, use `4294967295`.
|
||||
- **Reset value**: The value the counter resets to, typically `0`.
|
||||
|
||||
### Explicit tags
|
||||
|
||||
When **Explicit tags** is enabled (version 2.3+), OpenTSDB only returns time series that have all the tags specified in your query. This prevents unexpected results when some time series are missing tags that others have.
|
||||
|
||||
## Aggregators
|
||||
|
||||
The aggregator function combines multiple time series into one. Grafana fetches the list of available aggregators from your OpenTSDB server, so you may see additional aggregators beyond those listed here.
|
||||
|
||||
### Common aggregators
|
||||
|
||||
| Aggregator | Description | Use case |
|
||||
| ---------- | ----------------------------------------- | -------------------------------------- |
|
||||
| `sum` | Sum all values at each timestamp. | Total requests across all servers. |
|
||||
| `avg` | Average all values at each timestamp. | Average CPU usage across hosts. |
|
||||
| `min` | Take the minimum value at each timestamp. | Lowest response time. |
|
||||
| `max` | Take the maximum value at each timestamp. | Peak memory usage. |
|
||||
| `dev` | Calculate the standard deviation. | Measure variability in response times. |
|
||||
| `count` | Count the number of data points. | Number of reporting hosts. |
|
||||
|
||||
### Interpolation aggregators
|
||||
|
||||
These aggregators handle missing data points differently:
|
||||
|
||||
| Aggregator | Description |
|
||||
| ---------- | ---------------------------------------------------- |
|
||||
| `zimsum` | Sum values, treating missing data as zero. |
|
||||
| `mimmin` | Minimum value, ignoring missing (interpolated) data. |
|
||||
| `mimmax` | Maximum value, ignoring missing (interpolated) data. |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The available aggregators depend on your OpenTSDB server version and configuration. The aggregator dropdown is populated dynamically from the `/api/aggregators` endpoint on your OpenTSDB server.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Fill policies
|
||||
|
||||
Fill policies (available in OpenTSDB 2.2+) determine how to handle missing data points during downsampling. This is important when your data has gaps or irregular collection intervals.
|
||||
|
||||
| Policy | Description | Use case |
|
||||
| ------ | --------------------------------------------------- | ----------------------------------------------------------- |
|
||||
| `none` | Don't fill missing values. Gaps remain in the data. | Default behavior; preserves data fidelity. |
|
||||
| `nan` | Fill missing values with NaN (Not a Number). | Useful for calculations that should propagate missing data. |
|
||||
| `null` | Fill missing values with null. | Visualizations show gaps at null points. |
|
||||
| `zero` | Fill missing values with zero. | Treat missing data as zero values; useful for counters. |
|
||||
|
||||
### Choose the right fill policy
|
||||
|
||||
- Use `none` (default) when you want to see actual data gaps in your visualizations.
|
||||
- Use `null` when you want graphs to show breaks at missing data points.
|
||||
- Use `zero` when missing data should be interpreted as zero (for example, no requests during a period).
|
||||
- Use `nan` when you need missing values to propagate through calculations.
|
||||
|
||||
## Autocomplete suggestions
|
||||
|
||||
The query editor provides autocomplete suggestions to help you build queries quickly and accurately.
|
||||
|
||||
### What autocomplete provides
|
||||
|
||||
| Field | Source | Description |
|
||||
| --------------- | --------------------------- | ----------------------------------------------- |
|
||||
| **Metric** | `/api/suggest?type=metrics` | Suggests metric names as you type. |
|
||||
| **Tag keys** | Previous query results | Suggests tag keys based on the selected metric. |
|
||||
| **Tag values** | `/api/suggest?type=tagv` | Suggests tag values as you type. |
|
||||
| **Filter keys** | Previous query results | Suggests tag keys for filter configuration. |
|
||||
|
||||
### Autocomplete requirements
|
||||
|
||||
For autocomplete to work:
|
||||
|
||||
- The OpenTSDB suggest API must be enabled on your server.
|
||||
- Metrics must exist in your OpenTSDB database.
|
||||
- The **Lookup limit** setting in your data source configuration controls the maximum number of suggestions returned.
|
||||
|
||||
If autocomplete isn't working, refer to [Troubleshooting](ref:troubleshooting-opentsdb).
|
||||
|
||||
## Use template variables
|
||||
|
||||
You can use template variables in any text field in the query editor. Template variables are replaced with their current values when the query executes.
|
||||
|
||||
Common uses include:
|
||||
|
||||
- **Metric field**: `$metric` to dynamically select metrics.
|
||||
- **Filter values**: `$host` to filter by a variable-selected host.
|
||||
- **Tag values**: `$environment` to filter by environment.
|
||||
|
||||
For more information about creating and using template variables, refer to [Template variables](ref:template-variables).
|
||||
|
||||
## Query examples
|
||||
|
||||
The following examples demonstrate common query patterns.
|
||||
|
||||
### Basic metric query with tag filtering
|
||||
|
||||
| Field | Value |
|
||||
| ---------- | ------------------ |
|
||||
| Metric | `sys.cpu.user` |
|
||||
| Aggregator | `avg` |
|
||||
| Tags | `host=webserver01` |
|
||||
|
||||
This query returns the average CPU usage for the host `webserver01`.
|
||||
|
||||
### Query with wildcard filter (OpenTSDB 2.2+)
|
||||
|
||||
| Field | Value |
|
||||
| ------------ | --------------------- |
|
||||
| Metric | `http.requests.count` |
|
||||
| Aggregator | `sum` |
|
||||
| Filter Key | `host` |
|
||||
| Filter Type | `wildcard` |
|
||||
| Filter Value | `web-*` |
|
||||
| Group by | enabled |
|
||||
|
||||
This query sums HTTP request counts across all hosts matching `web-*` and groups results by host.
|
||||
|
||||
### Rate calculation for network counters
|
||||
|
||||
| Field | Value |
|
||||
| ----------- | ---------------------- |
|
||||
| Metric | `net.bytes.received` |
|
||||
| Aggregator | `sum` |
|
||||
| Rate | enabled |
|
||||
| Counter | enabled |
|
||||
| Counter max | `18446744073709551615` |
|
||||
|
||||
This query calculates the rate of bytes received per second. The counter max is set to the 64-bit unsigned integer maximum to handle counter wraps correctly.
|
||||
|
||||
### Using alias patterns
|
||||
|
||||
| Field | Value |
|
||||
| ---------- | --------------------------- |
|
||||
| Metric | `app.response.time` |
|
||||
| Aggregator | `avg` |
|
||||
| Tags | `host=*`, `env=production` |
|
||||
| Alias | `$tag_host - Response Time` |
|
||||
|
||||
This query uses the alias pattern to create readable legend labels like `webserver01 - Response Time`.
|
||||
|
||||
### Downsampling with custom interval
|
||||
|
||||
| Field | Value |
|
||||
| --------------------- | ------------------- |
|
||||
| Metric | `sys.disk.io.bytes` |
|
||||
| Aggregator | `sum` |
|
||||
| Downsample Interval | `5m` |
|
||||
| Downsample Aggregator | `avg` |
|
||||
| Fill | `zero` |
|
||||
|
||||
This query downsamples disk I/O data to 5-minute averages, filling gaps with zero values.
|
||||
|
||||
### Compare environments with filters
|
||||
|
||||
| Field | Value |
|
||||
| ------------ | --------------------- |
|
||||
| Metric | `app.errors.count` |
|
||||
| Aggregator | `sum` |
|
||||
| Filter Key | `env` |
|
||||
| Filter Type | `literal_or` |
|
||||
| Filter Value | `staging\|production` |
|
||||
| Group by | enabled |
|
||||
|
||||
This query shows error counts for both staging and production environments as separate time series for comparison.
|
||||
|
||||
### Exclude specific hosts
|
||||
|
||||
| Field | Value |
|
||||
| ------------ | ------------------------- |
|
||||
| Metric | `sys.cpu.user` |
|
||||
| Aggregator | `avg` |
|
||||
| Filter Key | `host` |
|
||||
| Filter Type | `not_literal_or` |
|
||||
| Filter Value | `test-server\|dev-server` |
|
||||
| Group by | enabled |
|
||||
|
||||
This query shows CPU usage for all hosts except test-server and dev-server.
|
||||
|
||||
### Query with explicit tags (version 2.3+)
|
||||
|
||||
| Field | Value |
|
||||
| ------------- | --------------------- |
|
||||
| Metric | `app.request.latency` |
|
||||
| Aggregator | `avg` |
|
||||
| Filter Key | `host` |
|
||||
| Filter Type | `wildcard` |
|
||||
| Filter Value | `*` |
|
||||
| Group by | enabled |
|
||||
| Explicit tags | enabled |
|
||||
|
||||
This query only returns time series that have the `host` tag defined, excluding any time series that are missing this tag.
|
||||
|
||||
## Next steps
|
||||
|
||||
- [Use template variables](ref:template-variables) to create dynamic, reusable dashboards.
|
||||
- [Set up alerting](ref:alerting) to get notified when metrics cross thresholds.
|
||||
- [Troubleshoot issues](ref:troubleshooting-opentsdb) if you encounter problems with queries.
|
||||
251
docs/sources/datasources/opentsdb/template-variables/index.md
Normal file
251
docs/sources/datasources/opentsdb/template-variables/index.md
Normal file
|
|
@ -0,0 +1,251 @@
|
|||
---
|
||||
description: Use template variables with the OpenTSDB data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- opentsdb
|
||||
- template
|
||||
- variables
|
||||
- dashboard
|
||||
- dynamic
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Template variables
|
||||
title: OpenTSDB template variables
|
||||
weight: 300
|
||||
last_reviewed: 2026-01-28
|
||||
refs:
|
||||
variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
|
||||
troubleshooting-opentsdb:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/troubleshooting/
|
||||
query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/query-editor/
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/alerting/
|
||||
---
|
||||
|
||||
# OpenTSDB template variables
|
||||
|
||||
Instead of hard-coding server, application, and sensor names in your metric queries, you can use template variables. Variables appear as drop-down menus at the top of the dashboard, making it easy to change the data being displayed without editing queries.
|
||||
|
||||
For an introduction to template variables, refer to the [Variables](ref:variables) documentation.
|
||||
|
||||
## Query variable
|
||||
|
||||
The OpenTSDB data source supports query-type template variables that fetch values directly from OpenTSDB. These variables dynamically populate based on data in your OpenTSDB database.
|
||||
|
||||
### Supported query functions
|
||||
|
||||
| Query | Description | API used |
|
||||
| ---------------------------- | -------------------------------------------------------------------------------- | --------------------------- |
|
||||
| `metrics(prefix)` | Returns metric names matching the prefix. Use empty parentheses for all metrics. | `/api/suggest?type=metrics` |
|
||||
| `tag_names(metric)` | Returns tag keys (names) that exist for a specific metric. | `/api/search/lookup` |
|
||||
| `tag_values(metric, tagkey)` | Returns tag values for a specific metric and tag key. | `/api/search/lookup` |
|
||||
| `suggest_tagk(prefix)` | Returns tag keys matching the prefix across all metrics. | `/api/suggest?type=tagk` |
|
||||
| `suggest_tagv(prefix)` | Returns tag values matching the prefix across all metrics. | `/api/suggest?type=tagv` |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The `tag_names` and `tag_values` functions use the OpenTSDB lookup API, which requires metrics to exist in your database. The `suggest_tagk` and `suggest_tagv` functions use the suggest API, which searches across all metrics.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Create a query variable
|
||||
|
||||
To create a query variable:
|
||||
|
||||
1. Navigate to **Dashboard settings** > **Variables**.
|
||||
1. Click **Add variable**.
|
||||
1. Enter a **Name** for your variable (for example, `host`).
|
||||
1. Select **Query** as the variable type.
|
||||
1. Select the **OpenTSDB** data source.
|
||||
1. Enter your query using one of the supported query functions.
|
||||
1. Optionally configure **Multi-value** to allow selecting multiple values.
|
||||
1. Optionally configure **Include All option** to add an "All" option.
|
||||
1. Click **Apply**.
|
||||
|
||||
### Query variable examples
|
||||
|
||||
**List all metrics:**
|
||||
|
||||
```
|
||||
metrics()
|
||||
```
|
||||
|
||||
Returns all metric names in your OpenTSDB database. Useful for creating a metric selector.
|
||||
|
||||
**List metrics with a prefix:**
|
||||
|
||||
```
|
||||
metrics(sys.cpu)
|
||||
```
|
||||
|
||||
Returns metrics starting with `sys.cpu`, such as `sys.cpu.user`, `sys.cpu.system`, `sys.cpu.idle`.
|
||||
|
||||
**List tag keys for a metric:**
|
||||
|
||||
```
|
||||
tag_names(sys.cpu.user)
|
||||
```
|
||||
|
||||
Returns tag keys like `host`, `env`, `datacenter` that exist on the `sys.cpu.user` metric.
|
||||
|
||||
**List tag values for a metric and tag key:**
|
||||
|
||||
```
|
||||
tag_values(sys.cpu.user, host)
|
||||
```
|
||||
|
||||
Returns all host values for the `sys.cpu.user` metric, such as `webserver01`, `webserver02`, `dbserver01`.
|
||||
|
||||
**Search for tag keys by prefix:**
|
||||
|
||||
```
|
||||
suggest_tagk(host)
|
||||
```
|
||||
|
||||
Returns tag keys matching `host` across all metrics, such as `host`, `hostname`, `host_id`.
|
||||
|
||||
**Search for tag values by prefix:**
|
||||
|
||||
```
|
||||
suggest_tagv(web)
|
||||
```
|
||||
|
||||
Returns tag values matching `web` across all metrics, such as `webserver01`, `webserver02`, `web-prod-01`.
|
||||
|
||||
If template variables aren't populating in the **Preview of values** section, refer to [Troubleshooting](ref:troubleshooting-opentsdb).
|
||||
|
||||
## Nested template variables
|
||||
|
||||
You can use one template variable to filter values for another. This creates cascading filters, such as selecting a data center first, then showing only hosts in that data center.
|
||||
|
||||
### Filter syntax
|
||||
|
||||
The `tag_values` function accepts additional tag filters after the tag key:
|
||||
|
||||
```
|
||||
tag_values(metric, tagkey, tag1=value1, tag2=value2, ...)
|
||||
```
|
||||
|
||||
Use template variables as filter values to create dynamic dependencies:
|
||||
|
||||
```
|
||||
tag_values(metric, tagkey, tag1=$variable1, tag2=$variable2)
|
||||
```
|
||||
|
||||
### Nested variable examples
|
||||
|
||||
| Query | Description |
|
||||
| ---------------------------------------------------------- | ------------------------------------------------------------ |
|
||||
| `tag_values(sys.cpu.user, host, env=$env)` | Returns host values filtered by the selected `env` value. |
|
||||
| `tag_values(sys.cpu.user, host, env=$env, datacenter=$dc)` | Returns host values filtered by both `env` and `datacenter`. |
|
||||
| `tag_values(app.requests, endpoint, service=$service)` | Returns endpoint values for the selected service. |
|
||||
|
||||
### Create cascading filters
|
||||
|
||||
To create a hierarchy of dependent variables:
|
||||
|
||||
1. **Create the parent variable:**
|
||||
- Name: `datacenter`
|
||||
- Query: `tag_values(sys.cpu.user, datacenter)`
|
||||
|
||||
2. **Create the child variable:**
|
||||
- Name: `host`
|
||||
- Query: `tag_values(sys.cpu.user, host, datacenter=$datacenter)`
|
||||
|
||||
3. **Create additional levels as needed:**
|
||||
- Name: `cpu`
|
||||
- Query: `tag_values(sys.cpu.user, cpu, datacenter=$datacenter, host=$host)`
|
||||
|
||||
When users change the data center selection, the host variable automatically refreshes to show only hosts in that data center.
|
||||
|
||||
## Use variables in queries
|
||||
|
||||
Reference variables in your queries using the `$variablename` or `${variablename}` syntax. Grafana replaces the variable with its current value when the query executes.
|
||||
|
||||
### Where to use variables
|
||||
|
||||
Variables can be used in these query editor fields:
|
||||
|
||||
| Field | Example | Description |
|
||||
| ----------------------- | ------------------- | ------------------------------------------- |
|
||||
| **Metric** | `$metric` | Dynamically select which metric to query. |
|
||||
| **Tag value** | `host=$host` | Filter by a variable-selected tag value. |
|
||||
| **Filter value** | `$host` | Use in filter value field for filtering. |
|
||||
| **Alias** | `$tag_host - $host` | Include variable values in legend labels. |
|
||||
| **Downsample interval** | `$interval` | Use a variable for the downsample interval. |
|
||||
|
||||
### Variable syntax options
|
||||
|
||||
| Syntax | Description |
|
||||
| ------------------------ | -------------------------------------------------------------------------------- |
|
||||
| `$variablename` | Simple syntax for most cases. |
|
||||
| `${variablename}` | Use when the variable is adjacent to other text (for example, `${host}_suffix`). |
|
||||
| `${variablename:format}` | Apply a specific format to the variable value. |
|
||||
|
||||
## Multi-value variables
|
||||
|
||||
When you enable **Multi-value** for a variable, users can select multiple values simultaneously. The OpenTSDB data source handles multi-value variables using pipe (`|`) separation, which is compatible with OpenTSDB's literal_or filter type.
|
||||
|
||||
### Configure multi-value variables
|
||||
|
||||
1. When creating the variable, enable **Multi-value**.
|
||||
1. Optionally enable **Include All option** to add an "All" selection.
|
||||
1. Use the variable in a filter with the `literal_or` filter type.
|
||||
|
||||
### Multi-value example
|
||||
|
||||
With a `host` variable configured as multi-value:
|
||||
|
||||
| Field | Value |
|
||||
| ------------ | ------------ |
|
||||
| Filter Key | `host` |
|
||||
| Filter Type | `literal_or` |
|
||||
| Filter Value | `$host` |
|
||||
|
||||
If the user selects `webserver01`, `webserver02`, and `webserver03`, the filter value becomes `webserver01|webserver02|webserver03`.
|
||||
|
||||
### All value behavior
|
||||
|
||||
When the user selects "All", Grafana sends all available values pipe-separated. For large value sets, consider using a wildcard filter instead:
|
||||
|
||||
| Field | Value |
|
||||
| ------------ | ---------- |
|
||||
| Filter Key | `host` |
|
||||
| Filter Type | `wildcard` |
|
||||
| Filter Value | `*` |
|
||||
|
||||
## Interval and auto-interval variables
|
||||
|
||||
Grafana provides built-in interval variables that are useful with OpenTSDB downsampling:
|
||||
|
||||
| Variable | Description |
|
||||
| ---------------- | ---------------------------------------------------------------------- |
|
||||
| `$__interval` | Automatically calculated interval based on time range and panel width. |
|
||||
| `$__interval_ms` | Same as `$__interval` but in milliseconds. |
|
||||
|
||||
Use these in the downsample interval field for automatic interval adjustment:
|
||||
|
||||
| Field | Value |
|
||||
| ------------------- | ------------- |
|
||||
| Downsample Interval | `$__interval` |
|
||||
|
||||
## Next steps
|
||||
|
||||
- [Build queries](ref:query-editor) using your template variables.
|
||||
- [Set up alerting](ref:alerting) with templated queries.
|
||||
- [Troubleshoot issues](ref:troubleshooting-opentsdb) if variables aren't populating.
|
||||
204
docs/sources/datasources/opentsdb/troubleshooting/index.md
Normal file
204
docs/sources/datasources/opentsdb/troubleshooting/index.md
Normal file
|
|
@ -0,0 +1,204 @@
|
|||
---
|
||||
description: Troubleshoot OpenTSDB data source issues in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- opentsdb
|
||||
- troubleshooting
|
||||
- errors
|
||||
- connection
|
||||
- query
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Troubleshooting
|
||||
title: Troubleshoot OpenTSDB data source issues
|
||||
weight: 500
|
||||
last_reviewed: 2026-01-28
|
||||
refs:
|
||||
configure-opentsdb:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/configure/
|
||||
template-variables-opentsdb:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/opentsdb/template-variables/
|
||||
---
|
||||
|
||||
# Troubleshoot OpenTSDB data source issues
|
||||
|
||||
This document provides solutions to common issues you may encounter when configuring or using the OpenTSDB data source. For configuration instructions, refer to [Configure the OpenTSDB data source](ref:configure-opentsdb).
|
||||
|
||||
## Connection errors
|
||||
|
||||
These errors occur when Grafana can't connect to the OpenTSDB server.
|
||||
|
||||
### "Connection refused" or timeout errors
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Save & test fails
|
||||
- Queries return connection errors
|
||||
- Intermittent timeouts
|
||||
|
||||
**Possible causes and solutions:**
|
||||
|
||||
| Cause | Solution |
|
||||
| ---------------------------- | ----------------------------------------------------------------------------------------------------------------- |
|
||||
| Wrong URL or port | Verify the URL includes the correct protocol, IP address, and port. The default port is `4242`. |
|
||||
| OpenTSDB not running | Check that the OpenTSDB server is running and accessible. |
|
||||
| Firewall blocking connection | Ensure firewall rules allow outbound connections from Grafana to the OpenTSDB server on the configured port. |
|
||||
| Network issues | Verify network connectivity between Grafana and OpenTSDB. Try pinging the server or using `curl` to test the API. |
|
||||
|
||||
To test connectivity manually, run:
|
||||
|
||||
```sh
|
||||
curl http://<OPENTSDB_HOST>:4242/api/version
|
||||
```
|
||||
|
||||
## Authentication errors
|
||||
|
||||
These errors occur when credentials are invalid or misconfigured.
|
||||
|
||||
### "401 Unauthorized" or "403 Forbidden"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Save & test fails with authentication error
|
||||
- Queries return authorization errors
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify that basic authentication credentials are correct in the data source configuration.
|
||||
1. Check that the OpenTSDB server is configured to accept the provided credentials.
|
||||
1. If using cookies for authentication, ensure the required cookies are listed in **Allowed cookies**.
|
||||
|
||||
## Query errors
|
||||
|
||||
These errors occur when executing queries against OpenTSDB.
|
||||
|
||||
### No data returned
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query executes without error but returns no data
|
||||
- Panels show "No data" message
|
||||
|
||||
**Possible causes and solutions:**
|
||||
|
||||
| Cause | Solution |
|
||||
| ------------------------------- | ------------------------------------------------------------------------------------------------------- |
|
||||
| Time range doesn't contain data | Expand the dashboard time range. Verify data exists in OpenTSDB for the selected period. |
|
||||
| Wrong metric name | Verify the metric name is correct. Use autocomplete to discover available metrics. |
|
||||
| Incorrect tag filters | Remove or adjust tag filters. Use `*` as a wildcard to match all values. |
|
||||
| Version mismatch | Ensure the configured OpenTSDB version matches your server. Filters are only available in version 2.2+. |
|
||||
| Using both Filters and Tags | Use either Filters or Tags, not both. They're mutually exclusive in OpenTSDB 2.2+. |
|
||||
|
||||
### Query timeout
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Queries take a long time and then fail
|
||||
- Error message mentions timeout
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Reduce the time range of your query.
|
||||
1. Add more specific tag filters to reduce the data volume.
|
||||
1. Increase the **Timeout** setting in the data source configuration.
|
||||
1. Enable downsampling to reduce the number of data points returned.
|
||||
1. Check OpenTSDB server performance and HBase health.
|
||||
|
||||
## Autocomplete doesn't work
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- No suggestions appear when typing metric names, tag names, or tag values
|
||||
- Drop-down menus are empty
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify that the OpenTSDB `/api/suggest` endpoint is accessible. Test it manually with `curl http://<OPENTSDB_HOST>:4242/api/suggest?type=metrics`.
|
||||
1. Increase the **Lookup limit** setting if you have many metrics or tags.
|
||||
1. Verify that the data source connection is working by clicking **Save & test**.
|
||||
1. Check that metrics exist in OpenTSDB. The suggest API only returns metrics that have been written to the database.
|
||||
|
||||
## Template variables don't populate
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Template variable drop-down menus are empty
|
||||
- **Preview of values** shows no results
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Enable real-time metadata tracking in OpenTSDB by setting `tsd.core.meta.enable_realtime_ts` to `true` in your OpenTSDB configuration.
|
||||
1. Sync existing metadata by running `tsdb uid metasync` on the OpenTSDB server.
|
||||
1. Verify the variable query syntax is correct. Refer to [Template variables](ref:template-variables-opentsdb) for the correct syntax.
|
||||
1. Check that the data source connection is working.
|
||||
|
||||
## Performance issues
|
||||
|
||||
These issues relate to slow queries or high resource usage.
|
||||
|
||||
### Slow queries
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Dashboards take a long time to load
|
||||
- Queries are slow even for small time ranges
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Enable downsampling in the query editor to reduce data volume.
|
||||
1. Use more specific tag filters to limit the time series returned.
|
||||
1. Reduce the time range.
|
||||
1. Check OpenTSDB and HBase performance metrics.
|
||||
1. Consider increasing OpenTSDB heap size if memory is constrained.
|
||||
|
||||
### HBase performance issues
|
||||
|
||||
OpenTSDB relies on HBase for data storage. Performance problems in HBase directly affect OpenTSDB query performance.
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Monitor HBase region server health and compaction status.
|
||||
1. Ensure sufficient heap memory is allocated to HBase region servers.
|
||||
1. Check for region hotspots and rebalance if necessary.
|
||||
1. Refer to the [OpenTSDB troubleshooting guide](http://opentsdb.net/docs/build/html/user_guide/troubleshooting.html) for HBase-specific issues.
|
||||
|
||||
## Enable debug logging
|
||||
|
||||
To capture detailed error information for troubleshooting:
|
||||
|
||||
1. Set the Grafana log level to `debug` in the configuration file:
|
||||
|
||||
```ini
|
||||
[log]
|
||||
level = debug
|
||||
```
|
||||
|
||||
1. Review logs in `/var/log/grafana/grafana.log` (or your configured log location).
|
||||
1. Look for OpenTSDB-specific entries that include request and response details.
|
||||
1. Reset the log level to `info` after troubleshooting to avoid excessive log volume.
|
||||
|
||||
## Get additional help
|
||||
|
||||
If you've tried the solutions in this document and still encounter issues:
|
||||
|
||||
1. Check the [Grafana community forums](https://community.grafana.com/) for similar issues.
|
||||
1. Review [OpenTSDB issues on GitHub](https://github.com/grafana/grafana/issues?q=opentsdb) for known bugs.
|
||||
1. Consult the [OpenTSDB documentation](http://opentsdb.net/docs/build/html/index.html) for server-specific guidance.
|
||||
1. Contact Grafana Support if you're a Grafana Enterprise, Cloud Pro, or Cloud Contracted user.
|
||||
|
||||
When reporting issues, include:
|
||||
|
||||
- Grafana version
|
||||
- OpenTSDB version
|
||||
- Error messages (redact sensitive information)
|
||||
- Steps to reproduce
|
||||
- Data source configuration (redact credentials)
|
||||
2
go.mod
2
go.mod
|
|
@ -88,7 +88,7 @@ require (
|
|||
github.com/googleapis/gax-go/v2 v2.15.0 // @grafana/grafana-backend-group
|
||||
github.com/gorilla/mux v1.8.1 // @grafana/grafana-backend-group
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // @grafana/grafana-app-platform-squad
|
||||
github.com/grafana/alerting v0.0.0-20260129164026-85d7010c64b8 // @grafana/alerting-backend
|
||||
github.com/grafana/alerting v0.0.0-20260203165836-8b17916e8173 // @grafana/alerting-backend
|
||||
github.com/grafana/authlib v0.0.0-20260203153107-16a114a99f67 // @grafana/identity-access-team
|
||||
github.com/grafana/authlib/types v0.0.0-20260203131350-b83e80394acc // @grafana/identity-access-team
|
||||
github.com/grafana/dataplane/examples v0.0.1 // @grafana/observability-metrics
|
||||
|
|
|
|||
4
go.sum
4
go.sum
|
|
@ -1600,8 +1600,8 @@ github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7Fsg
|
|||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grafana/alerting v0.0.0-20260129164026-85d7010c64b8 h1:964kdD/6Xyzr4g910nZnMtj0z16ijsvpA8Ju4sFOLjA=
|
||||
github.com/grafana/alerting v0.0.0-20260129164026-85d7010c64b8/go.mod h1:Ji0SfJChcwjgq8ljy6Y5CcYfHfAYKXjKYeysOoDS/6s=
|
||||
github.com/grafana/alerting v0.0.0-20260203165836-8b17916e8173 h1:nrQnGVRvBQK1zmg9rB6TA6tOeS0sSsUUV9JS1erkw2Q=
|
||||
github.com/grafana/alerting v0.0.0-20260203165836-8b17916e8173/go.mod h1:Ji0SfJChcwjgq8ljy6Y5CcYfHfAYKXjKYeysOoDS/6s=
|
||||
github.com/grafana/authlib v0.0.0-20260203153107-16a114a99f67 h1:4t3595k0Ef94NOlg4Br785+cTgAKa4rqeo9lMHbV1fs=
|
||||
github.com/grafana/authlib v0.0.0-20260203153107-16a114a99f67/go.mod h1:za8MGa5J9Bbgm2XorXc+FbGe72ln46OpN5o8P1uX9Og=
|
||||
github.com/grafana/authlib/types v0.0.0-20260203131350-b83e80394acc h1:wagsf4me4j/UFNocyMJHz5/803XpnfGJtNj8/YWy0j0=
|
||||
|
|
|
|||
|
|
@ -1287,6 +1287,11 @@ export interface FeatureToggles {
|
|||
*/
|
||||
newVizSuggestions?: boolean;
|
||||
/**
|
||||
* Enable style actions (copy/paste) in the panel editor
|
||||
* @default false
|
||||
*/
|
||||
panelStyleActions?: boolean;
|
||||
/**
|
||||
* Enable all plugins to supply visualization suggestions (including 3rd party plugins)
|
||||
* @default false
|
||||
*/
|
||||
|
|
|
|||
49
packages/grafana-schema/src/common/common.gen.ts
generated
49
packages/grafana-schema/src/common/common.gen.ts
generated
|
|
@ -1001,6 +1001,55 @@ export const defaultTableFooterOptions: Partial<TableFooterOptions> = {
|
|||
reducers: [],
|
||||
};
|
||||
|
||||
export interface TableOptions {
|
||||
/**
|
||||
* Controls the height of the rows
|
||||
*/
|
||||
cellHeight?: TableCellHeight;
|
||||
/**
|
||||
* If true, disables all keyboard events in the table. this is used when previewing a table (i.e. suggestions)
|
||||
*/
|
||||
disableKeyboardEvents?: boolean;
|
||||
/**
|
||||
* Enable pagination on the table
|
||||
*/
|
||||
enablePagination?: boolean;
|
||||
/**
|
||||
* Represents the index of the selected frame
|
||||
*/
|
||||
frameIndex: number;
|
||||
/**
|
||||
* Defines the number of columns to freeze on the left side of the table
|
||||
*/
|
||||
frozenColumns?: {
|
||||
left?: number;
|
||||
};
|
||||
/**
|
||||
* limits the maximum height of a row, if text wrapping or dynamic height is enabled
|
||||
*/
|
||||
maxRowHeight?: number;
|
||||
/**
|
||||
* Controls whether the panel should show the header
|
||||
*/
|
||||
showHeader: boolean;
|
||||
/**
|
||||
* Controls whether the header should show icons for the column types
|
||||
*/
|
||||
showTypeIcons?: boolean;
|
||||
/**
|
||||
* Used to control row sorting
|
||||
*/
|
||||
sortBy?: Array<TableSortByFieldState>;
|
||||
}
|
||||
|
||||
export const defaultTableOptions: Partial<TableOptions> = {
|
||||
cellHeight: TableCellHeight.Sm,
|
||||
frameIndex: 0,
|
||||
showHeader: true,
|
||||
showTypeIcons: false,
|
||||
sortBy: [],
|
||||
};
|
||||
|
||||
/**
|
||||
* Field options for each field within a table (e.g 10, "The String", 64.20, etc.)
|
||||
* Generally defines alignment, filtering capabilties, display options, etc.
|
||||
|
|
|
|||
|
|
@ -110,6 +110,29 @@ TableFooterOptions: {
|
|||
reducers?: [...string]
|
||||
} @cuetsy(kind="interface")
|
||||
|
||||
TableOptions: {
|
||||
// Represents the index of the selected frame
|
||||
frameIndex: number | *0
|
||||
// Controls whether the panel should show the header
|
||||
showHeader: bool | *true
|
||||
// Controls whether the header should show icons for the column types
|
||||
showTypeIcons?: bool | *false
|
||||
// Used to control row sorting
|
||||
sortBy?: [...TableSortByFieldState]
|
||||
// Enable pagination on the table
|
||||
enablePagination?: bool
|
||||
// Controls the height of the rows
|
||||
cellHeight?: TableCellHeight & (*"sm" | _)
|
||||
// limits the maximum height of a row, if text wrapping or dynamic height is enabled
|
||||
maxRowHeight?: number
|
||||
// Defines the number of columns to freeze on the left side of the table
|
||||
frozenColumns?: {
|
||||
left?: number | *0
|
||||
}
|
||||
// If true, disables all keyboard events in the table. this is used when previewing a table (i.e. suggestions)
|
||||
disableKeyboardEvents?: bool
|
||||
} @cuetsy(kind="interface")
|
||||
|
||||
// Field options for each field within a table (e.g 10, "The String", 64.20, etc.)
|
||||
// Generally defines alignment, filtering capabilties, display options, etc.
|
||||
TableFieldOptions: {
|
||||
|
|
@ -127,10 +150,10 @@ TableFieldOptions: {
|
|||
wrapText?: bool
|
||||
// Enables text wrapping for column headers
|
||||
wrapHeaderText?: bool
|
||||
// options for the footer for this field
|
||||
footer?: TableFooterOptions
|
||||
// Selecting or hovering this field will show a tooltip containing the content within the target field
|
||||
tooltip?: TableCellTooltipOptions
|
||||
// The name of the field which contains styling overrides for this cell
|
||||
styleField?: string
|
||||
// options for the footer for this field
|
||||
footer?: TableFooterOptions
|
||||
} & HideableFieldConfig @cuetsy(kind="interface")
|
||||
|
|
|
|||
|
|
@ -14,53 +14,6 @@ import * as ui from '@grafana/schema';
|
|||
|
||||
export const pluginVersion = "12.4.0-pre";
|
||||
|
||||
export interface Options {
|
||||
/**
|
||||
* Controls the height of the rows
|
||||
*/
|
||||
cellHeight?: ui.TableCellHeight;
|
||||
/**
|
||||
* If true, disables all keyboard events in the table. this is used when previewing a table (i.e. suggestions)
|
||||
*/
|
||||
disableKeyboardEvents?: boolean;
|
||||
/**
|
||||
* Enable pagination on the table
|
||||
*/
|
||||
enablePagination?: boolean;
|
||||
/**
|
||||
* Represents the index of the selected frame
|
||||
*/
|
||||
frameIndex: number;
|
||||
/**
|
||||
* Defines the number of columns to freeze on the left side of the table
|
||||
*/
|
||||
frozenColumns?: {
|
||||
left?: number;
|
||||
};
|
||||
/**
|
||||
* limits the maximum height of a row, if text wrapping or dynamic height is enabled
|
||||
*/
|
||||
maxRowHeight?: number;
|
||||
/**
|
||||
* Controls whether the panel should show the header
|
||||
*/
|
||||
showHeader: boolean;
|
||||
/**
|
||||
* Controls whether the header should show icons for the column types
|
||||
*/
|
||||
showTypeIcons?: boolean;
|
||||
/**
|
||||
* Used to control row sorting
|
||||
*/
|
||||
sortBy?: Array<ui.TableSortByFieldState>;
|
||||
}
|
||||
|
||||
export const defaultOptions: Partial<Options> = {
|
||||
cellHeight: ui.TableCellHeight.Sm,
|
||||
frameIndex: 0,
|
||||
showHeader: true,
|
||||
showTypeIcons: false,
|
||||
sortBy: [],
|
||||
};
|
||||
export interface Options extends ui.TableOptions {}
|
||||
|
||||
export interface FieldConfig extends ui.TableFieldOptions {}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ import (
|
|||
grafanaapiserver "github.com/grafana/grafana/pkg/services/apiserver"
|
||||
"github.com/grafana/grafana/pkg/services/auth"
|
||||
"github.com/grafana/grafana/pkg/services/authn/authnimpl"
|
||||
"github.com/grafana/grafana/pkg/services/authz"
|
||||
"github.com/grafana/grafana/pkg/services/cleanup"
|
||||
"github.com/grafana/grafana/pkg/services/cloudmigration"
|
||||
"github.com/grafana/grafana/pkg/services/dashboards/service"
|
||||
|
|
@ -74,6 +75,7 @@ func ProvideBackgroundServiceRegistry(
|
|||
secretsGarbageCollectionWorker *secretsgarbagecollectionworker.Worker,
|
||||
fixedRolesLoader *accesscontrol.FixedRolesLoader,
|
||||
installSync installsync.Syncer,
|
||||
zanzanaService *authz.EmbeddedZanzanaService,
|
||||
// Need to make sure these are initialized, is there a better place to put them?
|
||||
_ dashboardsnapshots.Service,
|
||||
_ serviceaccounts.Service,
|
||||
|
|
@ -122,6 +124,7 @@ func ProvideBackgroundServiceRegistry(
|
|||
secretsGarbageCollectionWorker,
|
||||
fixedRolesLoader,
|
||||
installSync,
|
||||
zanzanaService,
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
|||
26
pkg/server/wire_gen.go
generated
26
pkg/server/wire_gen.go
generated
|
|
@ -490,7 +490,11 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
zanzanaClient, err := authz.ProvideZanzanaClient(cfg, sqlStore, tracingService, featureToggles, registerer)
|
||||
server, err := authz.ProvideEmbeddedZanzanaServer(cfg, sqlStore, tracingService, featureToggles, registerer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
zanzanaClient, err := authz.ProvideZanzanaClient(cfg, sqlStore, server, featureToggles, registerer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -870,6 +874,7 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
embeddedZanzanaService := authz.ProvideEmbeddedZanzanaService(server)
|
||||
healthService, err := grpcserver.ProvideHealthService(cfg, grpcserverProvider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -961,13 +966,13 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api
|
|||
}
|
||||
ossUserProtectionImpl := authinfoimpl.ProvideOSSUserProtectionService()
|
||||
registration := authnimpl.ProvideRegistration(cfg, authnService, orgService, userAuthTokenService, acimplService, permissionRegistry, apikeyService, userService, authService, ossUserProtectionImpl, loginattemptimplService, quotaService, authinfoimplService, renderingService, featureToggles, oauthtokenService, socialService, remoteCache, ldapImpl, ossImpl, tracingService, tempuserService, notificationService)
|
||||
backgroundServiceRegistry := backgroundsvcs.ProvideBackgroundServiceRegistry(httpServer, alertNG, cleanUpService, grafanaLive, gateway, notificationService, pluginstoreService, renderingService, userAuthTokenService, tracingService, provisioningServiceImpl, usageStats, statscollectorService, grafanaService, pluginsService, internalMetricsService, secretsService, remoteCache, storageService, entityEventsService, serviceAccountsService, grpcserverProvider, secretMigrationProviderImpl, loginattemptimplService, supportbundlesimplService, metricService, keyRetriever, angulardetectorsproviderDynamic, apiserverService, anonDeviceService, ssosettingsimplService, pluginexternalService, plugininstallerService, zanzanaReconciler, appregistryService, dashboardUpdater, dashboardServiceImpl, worker, fixedRolesLoader, syncer, serviceImpl, serviceAccountsProxy, healthService, reflectionService, apiService, apiregistryService, idimplService, teamAPI, ssosettingsimplService, cloudmigrationService, registration)
|
||||
backgroundServiceRegistry := backgroundsvcs.ProvideBackgroundServiceRegistry(httpServer, alertNG, cleanUpService, grafanaLive, gateway, notificationService, pluginstoreService, renderingService, userAuthTokenService, tracingService, provisioningServiceImpl, usageStats, statscollectorService, grafanaService, pluginsService, internalMetricsService, secretsService, remoteCache, storageService, entityEventsService, serviceAccountsService, grpcserverProvider, secretMigrationProviderImpl, loginattemptimplService, supportbundlesimplService, metricService, keyRetriever, angulardetectorsproviderDynamic, apiserverService, anonDeviceService, ssosettingsimplService, pluginexternalService, plugininstallerService, zanzanaReconciler, appregistryService, dashboardUpdater, dashboardServiceImpl, worker, fixedRolesLoader, syncer, embeddedZanzanaService, serviceImpl, serviceAccountsProxy, healthService, reflectionService, apiService, apiregistryService, idimplService, teamAPI, ssosettingsimplService, cloudmigrationService, registration)
|
||||
usageStatsProvidersRegistry := usagestatssvcs.ProvideUsageStatsProvidersRegistry(acimplService, userService)
|
||||
server, err := New(opts, cfg, httpServer, acimplService, provisioningServiceImpl, backgroundServiceRegistry, usageStatsProvidersRegistry, statscollectorService, tracingService, featureToggles, registerer)
|
||||
serverServer, err := New(opts, cfg, httpServer, acimplService, provisioningServiceImpl, backgroundServiceRegistry, usageStatsProvidersRegistry, statscollectorService, tracingService, featureToggles, registerer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return server, nil
|
||||
return serverServer, nil
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
@ -1167,7 +1172,11 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
zanzanaClient, err := authz.ProvideZanzanaClient(cfg, sqlStore, tracingService, featureToggles, registerer)
|
||||
server, err := authz.ProvideEmbeddedZanzanaServer(cfg, sqlStore, tracingService, featureToggles, registerer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
zanzanaClient, err := authz.ProvideZanzanaClient(cfg, sqlStore, server, featureToggles, registerer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -1549,6 +1558,7 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
embeddedZanzanaService := authz.ProvideEmbeddedZanzanaService(server)
|
||||
healthService, err := grpcserver.ProvideHealthService(cfg, grpcserverProvider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -1640,13 +1650,13 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac
|
|||
}
|
||||
ossUserProtectionImpl := authinfoimpl.ProvideOSSUserProtectionService()
|
||||
registration := authnimpl.ProvideRegistration(cfg, authnService, orgService, userAuthTokenService, acimplService, permissionRegistry, apikeyService, userService, authService, ossUserProtectionImpl, loginattemptimplService, quotaService, authinfoimplService, renderingService, featureToggles, oauthtokentestService, socialService, remoteCache, ldapImpl, ossImpl, tracingService, tempuserService, notificationServiceMock)
|
||||
backgroundServiceRegistry := backgroundsvcs.ProvideBackgroundServiceRegistry(httpServer, alertNG, cleanUpService, grafanaLive, gateway, notificationService, pluginstoreService, renderingService, userAuthTokenService, tracingService, provisioningServiceImpl, usageStats, statscollectorService, grafanaService, pluginsService, internalMetricsService, secretsService, remoteCache, storageService, entityEventsService, serviceAccountsService, grpcserverProvider, secretMigrationProviderImpl, loginattemptimplService, supportbundlesimplService, metricService, keyRetriever, angulardetectorsproviderDynamic, apiserverService, anonDeviceService, ssosettingsimplService, pluginexternalService, plugininstallerService, zanzanaReconciler, appregistryService, dashboardUpdater, dashboardServiceImpl, worker, fixedRolesLoader, syncer, serviceImpl, serviceAccountsProxy, healthService, reflectionService, apiService, apiregistryService, idimplService, teamAPI, ssosettingsimplService, cloudmigrationService, registration)
|
||||
backgroundServiceRegistry := backgroundsvcs.ProvideBackgroundServiceRegistry(httpServer, alertNG, cleanUpService, grafanaLive, gateway, notificationService, pluginstoreService, renderingService, userAuthTokenService, tracingService, provisioningServiceImpl, usageStats, statscollectorService, grafanaService, pluginsService, internalMetricsService, secretsService, remoteCache, storageService, entityEventsService, serviceAccountsService, grpcserverProvider, secretMigrationProviderImpl, loginattemptimplService, supportbundlesimplService, metricService, keyRetriever, angulardetectorsproviderDynamic, apiserverService, anonDeviceService, ssosettingsimplService, pluginexternalService, plugininstallerService, zanzanaReconciler, appregistryService, dashboardUpdater, dashboardServiceImpl, worker, fixedRolesLoader, syncer, embeddedZanzanaService, serviceImpl, serviceAccountsProxy, healthService, reflectionService, apiService, apiregistryService, idimplService, teamAPI, ssosettingsimplService, cloudmigrationService, registration)
|
||||
usageStatsProvidersRegistry := usagestatssvcs.ProvideUsageStatsProvidersRegistry(acimplService, userService)
|
||||
server, err := New(opts, cfg, httpServer, acimplService, provisioningServiceImpl, backgroundServiceRegistry, usageStatsProvidersRegistry, statscollectorService, tracingService, featureToggles, registerer)
|
||||
serverServer, err := New(opts, cfg, httpServer, acimplService, provisioningServiceImpl, backgroundServiceRegistry, usageStatsProvidersRegistry, statscollectorService, tracingService, featureToggles, registerer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
testEnv, err := ProvideTestEnv(testingT, server, sqlStore, cfg, notificationServiceMock, grpcserverProvider, inMemory, httpclientProvider, oauthtokentestService, featureToggles, resourceClient, idimplService, factory, githubFactory, decryptService)
|
||||
testEnv, err := ProvideTestEnv(testingT, serverServer, sqlStore, cfg, notificationServiceMock, grpcserverProvider, inMemory, httpclientProvider, oauthtokentestService, featureToggles, resourceClient, idimplService, factory, githubFactory, decryptService)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,4 +7,6 @@ import (
|
|||
var WireSet = wire.NewSet(
|
||||
ProvideAuthZClient,
|
||||
ProvideZanzanaClient,
|
||||
ProvideEmbeddedZanzanaServer,
|
||||
ProvideEmbeddedZanzanaService,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -29,7 +29,6 @@ import (
|
|||
"github.com/grafana/grafana/pkg/services/authz/zanzana"
|
||||
zClient "github.com/grafana/grafana/pkg/services/authz/zanzana/client"
|
||||
zServer "github.com/grafana/grafana/pkg/services/authz/zanzana/server"
|
||||
zStore "github.com/grafana/grafana/pkg/services/authz/zanzana/store"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
"github.com/grafana/grafana/pkg/services/grpcserver"
|
||||
"github.com/grafana/grafana/pkg/services/grpcserver/interceptors"
|
||||
|
|
@ -38,7 +37,7 @@ import (
|
|||
|
||||
// ProvideZanzanaClient used to register ZanzanaClient.
|
||||
// It will also start an embedded ZanzanaSever if mode is set to "embedded".
|
||||
func ProvideZanzanaClient(cfg *setting.Cfg, db db.DB, tracer tracing.Tracer, features featuremgmt.FeatureToggles, reg prometheus.Registerer) (zanzana.Client, error) {
|
||||
func ProvideZanzanaClient(cfg *setting.Cfg, db db.DB, zanzanaServer zanzana.Server, features featuremgmt.FeatureToggles, reg prometheus.Registerer) (zanzana.Client, error) {
|
||||
//nolint:staticcheck // not yet migrated to OpenFeature
|
||||
if !features.IsEnabledGlobally(featuremgmt.FlagZanzana) {
|
||||
return zClient.NewNoopClient(), nil
|
||||
|
|
@ -56,22 +55,6 @@ func ProvideZanzanaClient(cfg *setting.Cfg, db db.DB, tracer tracing.Tracer, fea
|
|||
return NewRemoteZanzanaClient(zanzanaConfig, reg)
|
||||
|
||||
case setting.ZanzanaModeEmbedded:
|
||||
logger := log.New("zanzana.server")
|
||||
store, err := zStore.NewEmbeddedStore(cfg, db, logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to start zanzana: %w", err)
|
||||
}
|
||||
|
||||
openfga, err := zServer.NewOpenFGAServer(cfg.ZanzanaServer, store)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to start zanzana: %w", err)
|
||||
}
|
||||
|
||||
srv, err := zServer.NewServer(cfg.ZanzanaServer, openfga, logger, tracer, reg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to start zanzana: %w", err)
|
||||
}
|
||||
|
||||
channel := &inprocgrpc.Channel{}
|
||||
// Put * as a namespace so we can properly authorize request with in-proc mode
|
||||
channel.WithServerUnaryInterceptor(grpcAuth.UnaryServerInterceptor(func(ctx context.Context) (context.Context, error) {
|
||||
|
|
@ -86,9 +69,8 @@ func ProvideZanzanaClient(cfg *setting.Cfg, db db.DB, tracer tracing.Tracer, fea
|
|||
return ctx, nil
|
||||
}))
|
||||
|
||||
openfgav1.RegisterOpenFGAServiceServer(channel, openfga)
|
||||
authzv1.RegisterAuthzServiceServer(channel, srv)
|
||||
authzextv1.RegisterAuthzExtentionServiceServer(channel, srv)
|
||||
authzv1.RegisterAuthzServiceServer(channel, zanzanaServer)
|
||||
authzextv1.RegisterAuthzExtentionServiceServer(channel, zanzanaServer)
|
||||
|
||||
client, err := zClient.New(channel, reg)
|
||||
if err != nil {
|
||||
|
|
@ -101,6 +83,51 @@ func ProvideZanzanaClient(cfg *setting.Cfg, db db.DB, tracer tracing.Tracer, fea
|
|||
}
|
||||
}
|
||||
|
||||
// ProvideEmbeddedZanzanaServer creates and registers embedded ZanzanaServer.
|
||||
func ProvideEmbeddedZanzanaServer(cfg *setting.Cfg, db db.DB, tracer tracing.Tracer, features featuremgmt.FeatureToggles, reg prometheus.Registerer) (zanzana.Server, error) {
|
||||
//nolint:staticcheck // not yet migrated to OpenFeature
|
||||
if !features.IsEnabledGlobally(featuremgmt.FlagZanzana) {
|
||||
return zServer.NewNoopServer(), nil
|
||||
}
|
||||
|
||||
logger := log.New("zanzana.server")
|
||||
|
||||
srv, err := zServer.NewEmbeddedZanzanaServer(cfg, db, logger, tracer, reg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to start zanzana: %w", err)
|
||||
}
|
||||
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
// ProvideEmbeddedZanzanaService creates a background service wrapper for the embedded zanzana server
|
||||
// to ensure proper cleanup when Grafana shuts down.
|
||||
func ProvideEmbeddedZanzanaService(server zanzana.Server) *EmbeddedZanzanaService {
|
||||
return &EmbeddedZanzanaService{
|
||||
server: server,
|
||||
}
|
||||
}
|
||||
|
||||
// EmbeddedZanzanaService wraps the embedded zanzana server as a background service
|
||||
// to ensure Close() is called during shutdown.
|
||||
type EmbeddedZanzanaService struct {
|
||||
server zanzana.Server
|
||||
}
|
||||
|
||||
func (s *EmbeddedZanzanaService) Run(ctx context.Context) error {
|
||||
// The zanzana server doesn't have a blocking Run method,
|
||||
// so we just wait for shutdown
|
||||
<-ctx.Done()
|
||||
if s.server != nil {
|
||||
s.server.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *EmbeddedZanzanaService) IsDisabled() bool {
|
||||
return s.server == nil
|
||||
}
|
||||
|
||||
// ProvideStandaloneZanzanaClient provides a standalone Zanzana client, without registering the Zanzana service.
|
||||
// Client connects to a remote Zanzana server specified in the configuration.
|
||||
func ProvideStandaloneZanzanaClient(cfg *setting.Cfg, features featuremgmt.FeatureToggles, reg prometheus.Registerer) (zanzana.Client, error) {
|
||||
|
|
@ -185,11 +212,24 @@ var _ ZanzanaService = (*Zanzana)(nil)
|
|||
|
||||
// ProvideZanzanaService is used to register zanzana as a module so we can run it seperatly from grafana.
|
||||
func ProvideZanzanaService(cfg *setting.Cfg, features featuremgmt.FeatureToggles, reg prometheus.Registerer) (*Zanzana, error) {
|
||||
tracingCfg, err := tracing.ProvideTracingConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to provide tracing config: %w", err)
|
||||
}
|
||||
|
||||
tracingCfg.ServiceName = "zanzana"
|
||||
|
||||
tracer, err := tracing.ProvideService(tracingCfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to provide tracing service: %w", err)
|
||||
}
|
||||
|
||||
s := &Zanzana{
|
||||
cfg: cfg,
|
||||
features: features,
|
||||
logger: log.New("zanzana.server"),
|
||||
reg: reg,
|
||||
tracer: tracer,
|
||||
}
|
||||
|
||||
s.BasicService = services.NewBasicService(s.start, s.running, s.stopping).WithName("zanzana")
|
||||
|
|
@ -200,48 +240,28 @@ func ProvideZanzanaService(cfg *setting.Cfg, features featuremgmt.FeatureToggles
|
|||
type Zanzana struct {
|
||||
*services.BasicService
|
||||
|
||||
cfg *setting.Cfg
|
||||
|
||||
logger log.Logger
|
||||
handle grpcserver.Provider
|
||||
features featuremgmt.FeatureToggles
|
||||
reg prometheus.Registerer
|
||||
cfg *setting.Cfg
|
||||
zanzanaServer zanzana.Server
|
||||
logger log.Logger
|
||||
tracer tracing.Tracer
|
||||
handle grpcserver.Provider
|
||||
features featuremgmt.FeatureToggles
|
||||
reg prometheus.Registerer
|
||||
}
|
||||
|
||||
func (z *Zanzana) start(ctx context.Context) error {
|
||||
tracingCfg, err := tracing.ProvideTracingConfig(z.cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tracingCfg.ServiceName = "zanzana"
|
||||
|
||||
tracer, err := tracing.ProvideService(tracingCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store, err := zStore.NewStore(z.cfg, z.logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initilize zanana store: %w", err)
|
||||
}
|
||||
|
||||
openfgaServer, err := zServer.NewOpenFGAServer(z.cfg.ZanzanaServer, store)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start zanzana: %w", err)
|
||||
}
|
||||
|
||||
zanzanaServer, err := zServer.NewServer(z.cfg.ZanzanaServer, openfgaServer, z.logger, tracer, z.reg)
|
||||
zanzanaServer, err := zServer.NewZanzanaServer(z.cfg, z.logger, z.tracer, z.reg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start zanzana: %w", err)
|
||||
}
|
||||
z.zanzanaServer = zanzanaServer
|
||||
|
||||
var authenticatorInterceptor interceptors.Authenticator
|
||||
if z.cfg.ZanzanaServer.AllowInsecure && z.cfg.Env == setting.Dev {
|
||||
z.logger.Info("Allowing insecure connections to OpenFGA HTTP server")
|
||||
z.logger.Info("Allowing insecure connections to zanzana server")
|
||||
authenticatorInterceptor = noopAuthenticator{}
|
||||
} else {
|
||||
z.logger.Info("Requiring secure connections to OpenFGA HTTP server")
|
||||
z.logger.Info("Requiring secure connections to zanzana server")
|
||||
authenticator := authnlib.NewAccessTokenAuthenticator(
|
||||
authnlib.NewAccessTokenVerifier(
|
||||
authnlib.VerifierConfig{AllowedAudiences: []string{AuthzServiceAudience}},
|
||||
|
|
@ -253,7 +273,7 @@ func (z *Zanzana) start(ctx context.Context) error {
|
|||
authenticatorInterceptor = interceptors.AuthenticatorFunc(
|
||||
grpcutils.NewAuthenticatorInterceptor(
|
||||
authenticator,
|
||||
tracer,
|
||||
z.tracer,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
|
@ -262,7 +282,7 @@ func (z *Zanzana) start(ctx context.Context) error {
|
|||
z.cfg,
|
||||
z.features,
|
||||
authenticatorInterceptor,
|
||||
tracer,
|
||||
z.tracer,
|
||||
prometheus.DefaultRegisterer,
|
||||
)
|
||||
if err != nil {
|
||||
|
|
@ -270,7 +290,6 @@ func (z *Zanzana) start(ctx context.Context) error {
|
|||
}
|
||||
|
||||
grpcServer := z.handle.GetServer()
|
||||
openfgav1.RegisterOpenFGAServiceServer(grpcServer, openfgaServer)
|
||||
authzv1.RegisterAuthzServiceServer(grpcServer, zanzanaServer)
|
||||
authzextv1.RegisterAuthzExtentionServiceServer(grpcServer, zanzanaServer)
|
||||
|
||||
|
|
@ -278,6 +297,11 @@ func (z *Zanzana) start(ctx context.Context) error {
|
|||
healthServer := zServer.NewHealthServer(zanzanaServer)
|
||||
healthv1pb.RegisterHealthServer(grpcServer, healthServer)
|
||||
|
||||
if z.cfg.ZanzanaServer.OpenFGAHttpAddr != "" {
|
||||
// Register OpenFGA service server to pass to the HTTP server
|
||||
openfgav1.RegisterOpenFGAServiceServer(grpcServer, zanzanaServer.GetOpenFGAServer())
|
||||
}
|
||||
|
||||
if _, err := grpcserver.ProvideReflectionService(z.cfg, z.handle); err != nil {
|
||||
return fmt.Errorf("failed to register reflection for zanzana: %w", err)
|
||||
}
|
||||
|
|
@ -286,19 +310,10 @@ func (z *Zanzana) start(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (z *Zanzana) running(ctx context.Context) error {
|
||||
if z.cfg.Env == setting.Dev && z.cfg.ZanzanaServer.OpenFGAHttpAddr != "" {
|
||||
if z.cfg.ZanzanaServer.OpenFGAHttpAddr != "" {
|
||||
go func() {
|
||||
srv, err := zServer.NewOpenFGAHttpServer(z.cfg.ZanzanaServer, z.handle)
|
||||
if err != nil {
|
||||
z.logger.Error("failed to create OpenFGA HTTP server", "error", err)
|
||||
} else {
|
||||
z.logger.Info("Starting OpenFGA HTTP server")
|
||||
if z.cfg.ZanzanaServer.AllowInsecure {
|
||||
z.logger.Warn("Allowing unauthenticated connections!")
|
||||
}
|
||||
if err := srv.ListenAndServe(); err != nil {
|
||||
z.logger.Error("failed to start OpenFGA HTTP server", "error", err)
|
||||
}
|
||||
if err := z.runHTTPServer(); err != nil {
|
||||
z.logger.Error("failed to run OpenFGA HTTP server", "error", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
@ -311,6 +326,32 @@ func (z *Zanzana) stopping(err error) error {
|
|||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
z.logger.Error("Stopping zanzana due to unexpected error", "err", err)
|
||||
}
|
||||
z.zanzanaServer.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (z *Zanzana) runHTTPServer() error {
|
||||
if z.cfg.Env != setting.Dev && z.cfg.ZanzanaServer.AllowInsecure {
|
||||
return fmt.Errorf("allow_insecure is only supported in dev mode")
|
||||
}
|
||||
|
||||
z.logger.Info("Initializing OpenFGA HTTP server", "address", z.cfg.ZanzanaServer.OpenFGAHttpAddr)
|
||||
|
||||
httpSrv, err := zServer.NewOpenFGAHttpServer(z.cfg.ZanzanaServer, z.handle)
|
||||
if err != nil {
|
||||
z.logger.Error("failed to create OpenFGA HTTP server", "error", err)
|
||||
return err
|
||||
} else {
|
||||
z.logger.Info("Starting OpenFGA HTTP server", "address", z.cfg.ZanzanaServer.OpenFGAHttpAddr)
|
||||
if z.cfg.ZanzanaServer.AllowInsecure {
|
||||
z.logger.Warn("Allowing unauthenticated connections!")
|
||||
}
|
||||
if err := httpSrv.ListenAndServe(); err != nil {
|
||||
z.logger.Error("failed to start OpenFGA HTTP server", "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1 +1,13 @@
|
|||
package zanzana
|
||||
|
||||
import (
|
||||
authzv1 "github.com/grafana/authlib/authz/proto/v1"
|
||||
|
||||
authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
)
|
||||
|
||||
type Server interface {
|
||||
authzv1.AuthzServiceServer
|
||||
authzextv1.AuthzExtentionServiceServer
|
||||
Close()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,20 +3,19 @@ package server
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
claims "github.com/grafana/authlib/types"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
claims "github.com/grafana/authlib/types"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
)
|
||||
|
||||
func authorize(ctx context.Context, namespace string, ss setting.ZanzanaServerSettings) error {
|
||||
func authorize(ctx context.Context, namespace string, cfg setting.ZanzanaServerSettings) error {
|
||||
logger := log.New("zanzana.server.auth")
|
||||
if ss.AllowInsecure {
|
||||
if cfg.AllowInsecure {
|
||||
logger.Debug("AllowInsecure=true; skipping authorization check")
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
23
pkg/services/authz/zanzana/server/noop.go
Normal file
23
pkg/services/authz/zanzana/server/noop.go
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
authzv1 "github.com/grafana/authlib/authz/proto/v1"
|
||||
|
||||
authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana"
|
||||
)
|
||||
|
||||
var _ zanzana.Server = (*NoopServer)(nil)
|
||||
|
||||
func NewNoopServer() *NoopServer {
|
||||
return &NoopServer{}
|
||||
}
|
||||
|
||||
type NoopServer struct {
|
||||
authzv1.UnimplementedAuthzServiceServer
|
||||
authzextv1.UnimplementedAuthzExtentionServiceServer
|
||||
}
|
||||
|
||||
func (s *NoopServer) Close() {
|
||||
// noop
|
||||
}
|
||||
|
|
@ -192,12 +192,12 @@ func withListOptions(cfg setting.ZanzanaServerSettings) []server.OpenFGAServiceV
|
|||
return opts
|
||||
}
|
||||
|
||||
func NewOpenFGAHttpServer(cfg setting.ZanzanaServerSettings, srv grpcserver.Provider) (*http.Server, error) {
|
||||
func NewOpenFGAHttpServer(cfg setting.ZanzanaServerSettings, grpcSrv grpcserver.Provider) (*http.Server, error) {
|
||||
dialOpts := []grpc.DialOption{
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
}
|
||||
|
||||
addr := srv.GetAddress()
|
||||
addr := grpcSrv.GetAddress()
|
||||
// Wait until GRPC server is initialized
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
|
@ -205,7 +205,7 @@ func NewOpenFGAHttpServer(cfg setting.ZanzanaServerSettings, srv grpcserver.Prov
|
|||
retries := 0
|
||||
for addr == "" && retries < maxRetries {
|
||||
<-ticker.C
|
||||
addr = srv.GetAddress()
|
||||
addr = grpcSrv.GetAddress()
|
||||
retries++
|
||||
}
|
||||
if addr == "" {
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package server
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
|
@ -9,16 +10,19 @@ import (
|
|||
"github.com/fullstorydev/grpchan/inprocgrpc"
|
||||
authzv1 "github.com/grafana/authlib/authz/proto/v1"
|
||||
openfgav1 "github.com/openfga/api/proto/openfga/v1"
|
||||
"github.com/openfga/openfga/pkg/storage"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"google.golang.org/protobuf/types/known/wrapperspb"
|
||||
|
||||
dashboardV2alpha1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v2alpha1"
|
||||
dashboardV2beta1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v2beta1"
|
||||
"github.com/grafana/grafana/pkg/infra/db"
|
||||
"github.com/grafana/grafana/pkg/infra/localcache"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana/common"
|
||||
zStore "github.com/grafana/grafana/pkg/services/authz/zanzana/store"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
)
|
||||
|
||||
|
|
@ -36,8 +40,9 @@ type Server struct {
|
|||
authzv1.UnimplementedAuthzServiceServer
|
||||
authzextv1.UnimplementedAuthzExtentionServiceServer
|
||||
|
||||
openfga OpenFGAServer
|
||||
openfgaClient openfgav1.OpenFGAServiceClient
|
||||
openFGAServer OpenFGAServer
|
||||
openFGAClient openfgav1.OpenFGAServiceClient
|
||||
store storage.OpenFGADatastore
|
||||
|
||||
cfg setting.ZanzanaServerSettings
|
||||
stores map[string]storeInfo
|
||||
|
|
@ -54,18 +59,49 @@ type storeInfo struct {
|
|||
ModelID string
|
||||
}
|
||||
|
||||
func NewServer(cfg setting.ZanzanaServerSettings, openfga OpenFGAServer, logger log.Logger, tracer tracing.Tracer, reg prometheus.Registerer) (*Server, error) {
|
||||
func NewEmbeddedZanzanaServer(cfg *setting.Cfg, db db.DB, logger log.Logger, tracer tracing.Tracer, reg prometheus.Registerer) (*Server, error) {
|
||||
store, err := zStore.NewEmbeddedStore(cfg, db, logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to start zanzana: %w", err)
|
||||
}
|
||||
|
||||
openfga, err := NewOpenFGAServer(cfg.ZanzanaServer, store)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to start zanzana: %w", err)
|
||||
}
|
||||
|
||||
return newServer(cfg, openfga, store, logger, tracer, reg)
|
||||
}
|
||||
|
||||
func NewZanzanaServer(cfg *setting.Cfg, logger log.Logger, tracer tracing.Tracer, reg prometheus.Registerer) (*Server, error) {
|
||||
store, err := zStore.NewStore(cfg, logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initilize zanana store: %w", err)
|
||||
}
|
||||
|
||||
openfgaServer, err := NewOpenFGAServer(cfg.ZanzanaServer, store)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to start zanzana: %w", err)
|
||||
}
|
||||
|
||||
return newServer(cfg, openfgaServer, store, logger, tracer, reg)
|
||||
}
|
||||
|
||||
func newServer(cfg *setting.Cfg, openfga OpenFGAServer, store storage.OpenFGADatastore, logger log.Logger, tracer tracing.Tracer, reg prometheus.Registerer) (*Server, error) {
|
||||
channel := &inprocgrpc.Channel{}
|
||||
openfgav1.RegisterOpenFGAServiceServer(channel, openfga)
|
||||
openFGAClient := openfgav1.NewOpenFGAServiceClient(channel)
|
||||
|
||||
zanzanaCfg := cfg.ZanzanaServer
|
||||
|
||||
s := &Server{
|
||||
openfga: openfga,
|
||||
openfgaClient: openFGAClient,
|
||||
openFGAServer: openfga,
|
||||
openFGAClient: openFGAClient,
|
||||
store: store,
|
||||
storesMU: &sync.Mutex{},
|
||||
stores: make(map[string]storeInfo),
|
||||
cfg: cfg,
|
||||
cache: localcache.New(cfg.CacheSettings.CheckQueryCacheTTL, cacheCleanInterval),
|
||||
cfg: zanzanaCfg,
|
||||
cache: localcache.New(zanzanaCfg.CacheSettings.CheckQueryCacheTTL, cacheCleanInterval),
|
||||
logger: logger,
|
||||
tracer: tracer,
|
||||
metrics: newZanzanaServerMetrics(reg),
|
||||
|
|
@ -74,13 +110,21 @@ func NewServer(cfg setting.ZanzanaServerSettings, openfga OpenFGAServer, logger
|
|||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Server) GetOpenFGAServer() openfgav1.OpenFGAServiceServer {
|
||||
return s.openFGAServer
|
||||
}
|
||||
|
||||
func (s *Server) IsHealthy(ctx context.Context) (bool, error) {
|
||||
_, err := s.openfga.ListStores(ctx, &openfgav1.ListStoresRequest{
|
||||
_, err := s.openFGAClient.ListStores(ctx, &openfgav1.ListStoresRequest{
|
||||
PageSize: wrapperspb.Int32(1),
|
||||
})
|
||||
return err == nil, nil
|
||||
}
|
||||
|
||||
func (s *Server) Close() {
|
||||
s.store.Close()
|
||||
}
|
||||
|
||||
func (s *Server) getContextuals(subject string) (*openfgav1.ContextualTupleKeys, error) {
|
||||
contextuals := make([]*openfgav1.TupleKey, 0)
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@ import (
|
|||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana/common"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana/store"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
)
|
||||
|
|
@ -339,13 +338,7 @@ func setupBenchmarkServer(b *testing.B) (*Server, *benchmarkData) {
|
|||
cfg := setting.NewCfg()
|
||||
testStore := sqlstore.NewTestStore(b, sqlstore.WithCfg(cfg))
|
||||
|
||||
openFGAStore, err := store.NewEmbeddedStore(cfg, testStore, log.NewNopLogger())
|
||||
require.NoError(b, err)
|
||||
|
||||
openfga, err := NewOpenFGAServer(cfg.ZanzanaServer, openFGAStore)
|
||||
require.NoError(b, err)
|
||||
|
||||
srv, err := NewServer(cfg.ZanzanaServer, openfga, log.NewNopLogger(), tracing.NewNoopTracerService(), prometheus.NewRegistry())
|
||||
srv, err := NewEmbeddedZanzanaServer(cfg, testStore, log.NewNopLogger(), tracing.NewNoopTracerService(), prometheus.NewRegistry())
|
||||
require.NoError(b, err)
|
||||
|
||||
// Generate test data
|
||||
|
|
@ -403,7 +396,7 @@ func setupBenchmarkServer(b *testing.B) (*Server, *benchmarkData) {
|
|||
}
|
||||
batch := allTuples[i:end]
|
||||
|
||||
_, err = srv.openfga.Write(ctx, &openfgav1.WriteRequest{
|
||||
_, err = srv.openFGAClient.Write(ctx, &openfgav1.WriteRequest{
|
||||
StoreId: storeInf.ID,
|
||||
AuthorizationModelId: storeInf.ModelID,
|
||||
Writes: &openfgav1.WriteRequestWrites{
|
||||
|
|
|
|||
|
|
@ -194,7 +194,7 @@ func (s *Server) checkGeneric(ctx context.Context, subject, relation string, res
|
|||
}
|
||||
|
||||
func (s *Server) openfgaCheck(ctx context.Context, store *storeInfo, subject, relation, object string, contextuals *openfgav1.ContextualTupleKeys, resourceCtx *structpb.Struct) (*openfgav1.CheckResponse, error) {
|
||||
res, err := s.openfga.Check(ctx, &openfgav1.CheckRequest{
|
||||
res, err := s.openFGAClient.Check(ctx, &openfgav1.CheckRequest{
|
||||
StoreId: store.ID,
|
||||
AuthorizationModelId: store.ModelID,
|
||||
TupleKey: &openfgav1.CheckRequestTupleKey{
|
||||
|
|
|
|||
|
|
@ -8,9 +8,15 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
"github.com/grafana/grafana/pkg/util/testutil"
|
||||
)
|
||||
|
||||
func testCheck(t *testing.T, server *Server) {
|
||||
func TestIntegrationServerCheck(t *testing.T) {
|
||||
testutil.SkipIntegrationTestInShortMode(t)
|
||||
|
||||
server := setupOpenFGAServer(t)
|
||||
setup(t, server)
|
||||
|
||||
newReq := func(subject, verb, group, resource, subresource, folder, name string) *authzv1.CheckRequest {
|
||||
return &authzv1.CheckRequest{
|
||||
Namespace: namespace,
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ import (
|
|||
openfgav1 "github.com/openfga/api/proto/openfga/v1"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana/common"
|
||||
)
|
||||
|
|
@ -205,7 +206,7 @@ func (s *Server) listGeneric(ctx context.Context, subject, relation string, reso
|
|||
}
|
||||
|
||||
func (s *Server) listObjects(ctx context.Context, req *openfgav1.ListObjectsRequest) (*openfgav1.ListObjectsResponse, error) {
|
||||
fn := s.openfga.ListObjects
|
||||
fn := s.openFGAClient.ListObjects
|
||||
if s.cfg.UseStreamedListObjects {
|
||||
fn = s.streamedListObjects
|
||||
}
|
||||
|
|
@ -223,7 +224,7 @@ func (s *Server) listObjects(ctx context.Context, req *openfgav1.ListObjectsRequ
|
|||
return res, nil
|
||||
}
|
||||
|
||||
type listFn func(ctx context.Context, req *openfgav1.ListObjectsRequest) (*openfgav1.ListObjectsResponse, error)
|
||||
type listFn func(ctx context.Context, req *openfgav1.ListObjectsRequest, opts ...grpc.CallOption) (*openfgav1.ListObjectsResponse, error)
|
||||
|
||||
func (s *Server) listObjectCached(ctx context.Context, req *openfgav1.ListObjectsRequest, fn listFn) (*openfgav1.ListObjectsResponse, error) {
|
||||
ctx, span := s.tracer.Start(ctx, "server.listObjectCached")
|
||||
|
|
@ -247,7 +248,7 @@ func (s *Server) listObjectCached(ctx context.Context, req *openfgav1.ListObject
|
|||
return res, nil
|
||||
}
|
||||
|
||||
func (s *Server) streamedListObjects(ctx context.Context, req *openfgav1.ListObjectsRequest) (*openfgav1.ListObjectsResponse, error) {
|
||||
func (s *Server) streamedListObjects(ctx context.Context, req *openfgav1.ListObjectsRequest, opts ...grpc.CallOption) (*openfgav1.ListObjectsResponse, error) {
|
||||
ctx, span := s.tracer.Start(ctx, "server.streamedListObjects")
|
||||
defer span.End()
|
||||
|
||||
|
|
@ -261,7 +262,7 @@ func (s *Server) streamedListObjects(ctx context.Context, req *openfgav1.ListObj
|
|||
ContextualTuples: req.ContextualTuples,
|
||||
}
|
||||
|
||||
stream, err := s.openfgaClient.StreamedListObjects(ctx, r)
|
||||
stream, err := s.openFGAClient.StreamedListObjects(ctx, r, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,9 +9,181 @@ import (
|
|||
authzv1 "github.com/grafana/authlib/authz/proto/v1"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
"github.com/grafana/grafana/pkg/util/testutil"
|
||||
)
|
||||
|
||||
func testList(t *testing.T, server *Server) {
|
||||
func TestIntegrationServerList(t *testing.T) {
|
||||
testutil.SkipIntegrationTestInShortMode(t)
|
||||
|
||||
server := setupOpenFGAServer(t)
|
||||
setup(t, server)
|
||||
newList := func(subject, group, resource, subresource string) *authzv1.ListRequest {
|
||||
return &authzv1.ListRequest{
|
||||
Namespace: namespace,
|
||||
Verb: utils.VerbList,
|
||||
Subject: subject,
|
||||
Group: group,
|
||||
Resource: resource,
|
||||
Subresource: subresource,
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("user:1 should list resource:dashboard.grafana.app/dashboards/1", func(t *testing.T) {
|
||||
res, err := server.List(newContextWithNamespace(), newList("user:1", dashboardGroup, dashboardResource, ""))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, res.GetItems(), 1)
|
||||
assert.Len(t, res.GetFolders(), 0)
|
||||
assert.Equal(t, res.GetItems()[0], "1")
|
||||
})
|
||||
|
||||
t.Run("user:2 should be able to list all through group", func(t *testing.T) {
|
||||
res, err := server.List(newContextWithNamespace(), newList("user:2", dashboardGroup, dashboardResource, ""))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, res.GetAll())
|
||||
assert.Len(t, res.GetItems(), 0)
|
||||
assert.Len(t, res.GetFolders(), 0)
|
||||
})
|
||||
|
||||
t.Run("user:3 should be able to list resource:dashboard.grafana.app/dashboards/1 with set relation", func(t *testing.T) {
|
||||
res, err := server.List(newContextWithNamespace(), newList("user:3", dashboardGroup, dashboardResource, ""))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, res.GetItems(), 1)
|
||||
assert.Len(t, res.GetFolders(), 0)
|
||||
assert.Equal(t, res.GetItems()[0], "1")
|
||||
})
|
||||
|
||||
t.Run("user:4 should be able to list all dashboard.grafana.app/dashboards in folder 1 and 3", func(t *testing.T) {
|
||||
res, err := server.List(newContextWithNamespace(), newList("user:4", dashboardGroup, dashboardResource, ""))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, res.GetItems(), 0)
|
||||
assert.Len(t, res.GetFolders(), 2)
|
||||
|
||||
assert.Contains(t, res.GetFolders(), "1")
|
||||
assert.Contains(t, res.GetFolders(), "3")
|
||||
})
|
||||
|
||||
t.Run("user:5 should be list all dashboard.grafana.app/dashboards in folder 1 with set relation", func(t *testing.T) {
|
||||
res, err := server.List(newContextWithNamespace(), newList("user:5", dashboardGroup, dashboardResource, ""))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, res.GetItems(), 0)
|
||||
assert.Len(t, res.GetFolders(), 1)
|
||||
assert.Equal(t, res.GetFolders()[0], "1")
|
||||
})
|
||||
|
||||
t.Run("user:6 should be able to list folder 1", func(t *testing.T) {
|
||||
res, err := server.List(newContextWithNamespace(), newList("user:6", folderGroup, folderResource, ""))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, res.GetItems(), 1)
|
||||
assert.Len(t, res.GetFolders(), 0)
|
||||
assert.Equal(t, res.GetItems()[0], "1")
|
||||
})
|
||||
|
||||
t.Run("user:7 should be able to list all folders", func(t *testing.T) {
|
||||
res, err := server.List(newContextWithNamespace(), newList("user:7", folderGroup, folderResource, ""))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, res.GetItems(), 0)
|
||||
assert.Len(t, res.GetFolders(), 0)
|
||||
assert.True(t, res.GetAll())
|
||||
})
|
||||
|
||||
t.Run("user:8 should be able to list resoruce:dashboard.grafana.app/dashboard in folder 6 and folder 5", func(t *testing.T) {
|
||||
res, err := server.List(newContextWithNamespace(), newList("user:8", dashboardGroup, dashboardResource, ""))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, res.GetFolders(), 2)
|
||||
|
||||
assert.Contains(t, res.GetFolders(), "5")
|
||||
assert.Contains(t, res.GetFolders(), "6")
|
||||
})
|
||||
|
||||
t.Run("user:10 should be able to get resoruce:dashboard.grafana.app/dashboard/status for 10 and 11", func(t *testing.T) {
|
||||
res, err := server.List(newContextWithNamespace(), newList("user:10", dashboardGroup, dashboardResource, statusSubresource))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, res.GetFolders(), 0)
|
||||
assert.Len(t, res.GetItems(), 2)
|
||||
|
||||
assert.Contains(t, res.GetItems(), "10")
|
||||
assert.Contains(t, res.GetItems(), "11")
|
||||
})
|
||||
|
||||
t.Run("user:11 should be able to list all resoruce:dashboard.grafana.app/dashboard/status ", func(t *testing.T) {
|
||||
res, err := server.List(newContextWithNamespace(), newList("user:11", dashboardGroup, dashboardResource, statusSubresource))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, res.GetItems(), 0)
|
||||
assert.Len(t, res.GetFolders(), 0)
|
||||
assert.True(t, res.GetAll())
|
||||
})
|
||||
|
||||
t.Run("user:12 should be able to list all resoruce:dashboard.grafana.app/dashboard/status in folder 5 and 6", func(t *testing.T) {
|
||||
res, err := server.List(newContextWithNamespace(), newList("user:12", dashboardGroup, dashboardResource, statusSubresource))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, res.GetItems(), 0)
|
||||
assert.Len(t, res.GetFolders(), 2)
|
||||
|
||||
assert.Contains(t, res.GetFolders(), "5")
|
||||
assert.Contains(t, res.GetFolders(), "6")
|
||||
})
|
||||
|
||||
t.Run("user:13 should be able to list all subresources in folder 5 and 6", func(t *testing.T) {
|
||||
res, err := server.List(newContextWithNamespace(), newList("user:13", folderGroup, folderResource, statusSubresource))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, res.GetItems(), 2)
|
||||
assert.Len(t, res.GetFolders(), 0)
|
||||
|
||||
assert.Contains(t, res.GetItems(), "5")
|
||||
assert.Contains(t, res.GetItems(), "6")
|
||||
})
|
||||
|
||||
t.Run("user:14 should be able to list all subresources for team 1", func(t *testing.T) {
|
||||
res, err := server.List(newContextWithNamespace(), newList("user:14", teamGroup, teamResource, statusSubresource))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, res.GetItems(), 1)
|
||||
assert.Len(t, res.GetFolders(), 0)
|
||||
|
||||
assert.Contains(t, res.GetItems(), "1")
|
||||
})
|
||||
|
||||
t.Run("user:15 should be able to list all subresources for user 1", func(t *testing.T) {
|
||||
res, err := server.List(newContextWithNamespace(), newList("user:15", userGroup, userResource, statusSubresource))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, res.GetItems(), 1)
|
||||
assert.Len(t, res.GetFolders(), 0)
|
||||
|
||||
assert.Contains(t, res.GetItems(), "1")
|
||||
})
|
||||
|
||||
t.Run("user:16 should be able to list all subresources for service-account 1", func(t *testing.T) {
|
||||
res, err := server.List(newContextWithNamespace(), newList("user:16", serviceAccountGroup, serviceAccountResource, statusSubresource))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, res.GetItems(), 1)
|
||||
assert.Len(t, res.GetFolders(), 0)
|
||||
|
||||
assert.Contains(t, res.GetItems(), "1")
|
||||
})
|
||||
|
||||
t.Run("user:17 should be able to list all dashboards in folder 4 and all subfolders", func(t *testing.T) {
|
||||
res, err := server.List(newContextWithNamespace(), newList("user:17", dashboardGroup, dashboardResource, ""))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, res.GetItems(), 0)
|
||||
assert.Len(t, res.GetFolders(), 3)
|
||||
|
||||
assert.Contains(t, res.GetFolders(), "4")
|
||||
assert.Contains(t, res.GetFolders(), "5")
|
||||
assert.Contains(t, res.GetFolders(), "6")
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegrationServerListStreaming(t *testing.T) {
|
||||
testutil.SkipIntegrationTestInShortMode(t)
|
||||
|
||||
server := setupOpenFGAServer(t)
|
||||
setup(t, server)
|
||||
server.cfg.UseStreamedListObjects = true
|
||||
|
||||
t.Cleanup(func() {
|
||||
server.cfg.UseStreamedListObjects = false
|
||||
})
|
||||
|
||||
newList := func(subject, group, resource, subresource string) *authzv1.ListRequest {
|
||||
return &authzv1.ListRequest{
|
||||
Namespace: namespace,
|
||||
|
|
|
|||
|
|
@ -174,7 +174,7 @@ func (s *Server) writeTuples(ctx context.Context, store *storeInfo, writeTuples
|
|||
}
|
||||
}
|
||||
|
||||
_, err := s.openfga.Write(ctx, writeReq)
|
||||
_, err := s.openFGAClient.Write(ctx, writeReq)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ func (s *Server) listFolderParents(ctx context.Context, store *storeInfo, folder
|
|||
defer span.End()
|
||||
|
||||
object := zanzana.NewFolderIdent(folderUID)
|
||||
resp, err := s.openfga.Read(ctx, &openfgav1.ReadRequest{
|
||||
resp, err := s.openFGAClient.Read(ctx, &openfgav1.ReadRequest{
|
||||
StoreId: store.ID,
|
||||
TupleKey: &openfgav1.ReadRequestTupleKey{
|
||||
Object: object,
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import (
|
|||
|
||||
v1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana/common"
|
||||
"github.com/grafana/grafana/pkg/util/testutil"
|
||||
)
|
||||
|
||||
func setupMutateFolders(t *testing.T, srv *Server) *Server {
|
||||
|
|
@ -26,7 +27,10 @@ func setupMutateFolders(t *testing.T, srv *Server) *Server {
|
|||
return setupOpenFGADatabase(t, srv, tuples)
|
||||
}
|
||||
|
||||
func testMutateFolders(t *testing.T, srv *Server) {
|
||||
func TestIntegrationServerMutateFolders(t *testing.T) {
|
||||
testutil.SkipIntegrationTestInShortMode(t)
|
||||
|
||||
srv := setupOpenFGAServer(t)
|
||||
setupMutateFolders(t, srv)
|
||||
|
||||
t.Run("should create new folder parent relation", func(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ func (s *Server) getUserOrgRoleUpdateTuples(ctx context.Context, store *storeInf
|
|||
Object: zanzana.NewTupleEntry(zanzana.TypeRole, "", ""),
|
||||
},
|
||||
}
|
||||
res, err := s.openfga.Read(ctx, readReq)
|
||||
res, err := s.openFGAClient.Read(ctx, readReq)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import (
|
|||
|
||||
v1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana/common"
|
||||
"github.com/grafana/grafana/pkg/util/testutil"
|
||||
)
|
||||
|
||||
func setupMutateOrgRoles(t *testing.T, srv *Server) *Server {
|
||||
|
|
@ -21,7 +22,10 @@ func setupMutateOrgRoles(t *testing.T, srv *Server) *Server {
|
|||
return setupOpenFGADatabase(t, srv, tuples)
|
||||
}
|
||||
|
||||
func testMutateOrgRoles(t *testing.T, srv *Server) {
|
||||
func TestIntegrationServerMutateOrgRoles(t *testing.T) {
|
||||
testutil.SkipIntegrationTestInShortMode(t)
|
||||
|
||||
srv := setupOpenFGAServer(t)
|
||||
setupMutateOrgRoles(t, srv)
|
||||
|
||||
t.Run("should update user org role and delete old role", func(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import (
|
|||
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
|
||||
v1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana/common"
|
||||
"github.com/grafana/grafana/pkg/util/testutil"
|
||||
)
|
||||
|
||||
func setupMutateResourcePermissions(t *testing.T, srv *Server) *Server {
|
||||
|
|
@ -24,7 +25,10 @@ func setupMutateResourcePermissions(t *testing.T, srv *Server) *Server {
|
|||
return setupOpenFGADatabase(t, srv, tuples)
|
||||
}
|
||||
|
||||
func testMutateResourcePermissions(t *testing.T, srv *Server) {
|
||||
func TestIntegrationServerMutateResourcePermissions(t *testing.T) {
|
||||
testutil.SkipIntegrationTestInShortMode(t)
|
||||
|
||||
srv := setupOpenFGAServer(t)
|
||||
setupMutateResourcePermissions(t, srv)
|
||||
|
||||
t.Run("should create new resource permission", func(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import (
|
|||
|
||||
v1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana/common"
|
||||
"github.com/grafana/grafana/pkg/util/testutil"
|
||||
)
|
||||
|
||||
func setupMutateRoleBindings(t *testing.T, srv *Server) *Server {
|
||||
|
|
@ -21,7 +22,10 @@ func setupMutateRoleBindings(t *testing.T, srv *Server) *Server {
|
|||
return setupOpenFGADatabase(t, srv, tuples)
|
||||
}
|
||||
|
||||
func testMutateRoleBindings(t *testing.T, srv *Server) {
|
||||
func TestIntegrationServerMutateRoleBindings(t *testing.T) {
|
||||
testutil.SkipIntegrationTestInShortMode(t)
|
||||
|
||||
srv := setupOpenFGAServer(t)
|
||||
setupMutateRoleBindings(t, srv)
|
||||
|
||||
t.Run("should update user role and delete old role", func(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import (
|
|||
|
||||
v1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana/common"
|
||||
"github.com/grafana/grafana/pkg/util/testutil"
|
||||
)
|
||||
|
||||
func setupMutateRoles(t *testing.T, srv *Server) *Server {
|
||||
|
|
@ -21,7 +22,10 @@ func setupMutateRoles(t *testing.T, srv *Server) *Server {
|
|||
return setupOpenFGADatabase(t, srv, tuples)
|
||||
}
|
||||
|
||||
func testMutateRoles(t *testing.T, srv *Server) {
|
||||
func TestIntegrationServerMutateRoles(t *testing.T) {
|
||||
testutil.SkipIntegrationTestInShortMode(t)
|
||||
|
||||
srv := setupOpenFGAServer(t)
|
||||
setupMutateRoles(t, srv)
|
||||
|
||||
t.Run("should update role and delete old role permissions", func(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import (
|
|||
|
||||
v1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana/common"
|
||||
"github.com/grafana/grafana/pkg/util/testutil"
|
||||
)
|
||||
|
||||
func setupMutateTeamBindings(t *testing.T, srv *Server) *Server {
|
||||
|
|
@ -21,7 +22,10 @@ func setupMutateTeamBindings(t *testing.T, srv *Server) *Server {
|
|||
return setupOpenFGADatabase(t, srv, tuples)
|
||||
}
|
||||
|
||||
func testMutateTeamBindings(t *testing.T, srv *Server) {
|
||||
func TestIntegrationServerMutateTeamBindings(t *testing.T) {
|
||||
testutil.SkipIntegrationTestInShortMode(t)
|
||||
|
||||
srv := setupOpenFGAServer(t)
|
||||
setupMutateTeamBindings(t, srv)
|
||||
|
||||
t.Run("should update user team binding and delete old team binding", func(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ import (
|
|||
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
|
||||
v1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana/common"
|
||||
"github.com/grafana/grafana/pkg/util/testutil"
|
||||
)
|
||||
|
||||
func setupMutate(t *testing.T, srv *Server) *Server {
|
||||
|
|
@ -31,7 +32,10 @@ func setupMutate(t *testing.T, srv *Server) *Server {
|
|||
return setupOpenFGADatabase(t, srv, tuples)
|
||||
}
|
||||
|
||||
func testMutate(t *testing.T, srv *Server) {
|
||||
func TestIntegrationServerMutate(t *testing.T) {
|
||||
testutil.SkipIntegrationTestInShortMode(t)
|
||||
|
||||
srv := setupOpenFGAServer(t)
|
||||
setupMutate(t, srv)
|
||||
|
||||
t.Run("should perform multiple mutate operations", func(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import (
|
|||
|
||||
v1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana/common"
|
||||
"github.com/grafana/grafana/pkg/util/testutil"
|
||||
)
|
||||
|
||||
func setupFolders() []*openfgav1.TupleKey {
|
||||
|
|
@ -34,7 +35,10 @@ func setupQueryFolders(t *testing.T, srv *Server) *Server {
|
|||
return setupOpenFGADatabase(t, srv, tuples)
|
||||
}
|
||||
|
||||
func testQueryFolders(t *testing.T, srv *Server) {
|
||||
func TestIntegrationServerQueryFolders(t *testing.T) {
|
||||
testutil.SkipIntegrationTestInShortMode(t)
|
||||
|
||||
srv := setupOpenFGAServer(t)
|
||||
setupQueryFolders(t, srv)
|
||||
|
||||
t.Run("should query folder parents successfully", func(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ func (s *Server) read(ctx context.Context, req *authzextv1.ReadRequest) (*authze
|
|||
}
|
||||
}
|
||||
|
||||
res, err := s.openfga.Read(ctx, readReq)
|
||||
res, err := s.openFGAClient.Read(ctx, readReq)
|
||||
if err != nil {
|
||||
s.logger.Error("failed to perform openfga Read request", "error", errors.Unwrap(err))
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ func (s *Server) getStoreInfo(ctx context.Context, namespace string) (*storeInfo
|
|||
}
|
||||
|
||||
func (s *Server) getOrCreateStore(ctx context.Context, namespace string) (*openfgav1.Store, error) {
|
||||
res, err := s.openfga.ListStores(ctx, &openfgav1.ListStoresRequest{Name: namespace})
|
||||
res, err := s.openFGAClient.ListStores(ctx, &openfgav1.ListStoresRequest{Name: namespace})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load zanzana stores: %w", err)
|
||||
}
|
||||
|
|
@ -51,7 +51,7 @@ func (s *Server) getOrCreateStore(ctx context.Context, namespace string) (*openf
|
|||
}
|
||||
}
|
||||
|
||||
createStoreRes, err := s.openfga.CreateStore(ctx, &openfgav1.CreateStoreRequest{Name: namespace})
|
||||
createStoreRes, err := s.openFGAClient.CreateStore(ctx, &openfgav1.CreateStoreRequest{Name: namespace})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -72,7 +72,7 @@ func (s *Server) loadModel(ctx context.Context, storeID string, modules []transf
|
|||
|
||||
// ReadAuthorizationModels returns authorization models for a store sorted in descending order of creation.
|
||||
// So with a pageSize of 1 we will get the latest model.
|
||||
res, err := s.openfga.ReadAuthorizationModels(ctx, &openfgav1.ReadAuthorizationModelsRequest{
|
||||
res, err := s.openFGAClient.ReadAuthorizationModels(ctx, &openfgav1.ReadAuthorizationModelsRequest{
|
||||
StoreId: storeID,
|
||||
PageSize: &wrapperspb.Int32Value{Value: 1},
|
||||
ContinuationToken: continuationToken,
|
||||
|
|
@ -89,7 +89,7 @@ func (s *Server) loadModel(ctx context.Context, storeID string, modules []transf
|
|||
}
|
||||
}
|
||||
|
||||
writeRes, err := s.openfga.WriteAuthorizationModel(ctx, &openfgav1.WriteAuthorizationModelRequest{
|
||||
writeRes, err := s.openFGAClient.WriteAuthorizationModel(ctx, &openfgav1.WriteAuthorizationModelRequest{
|
||||
StoreId: storeID,
|
||||
TypeDefinitions: model.GetTypeDefinitions(),
|
||||
SchemaVersion: model.GetSchemaVersion(),
|
||||
|
|
|
|||
|
|
@ -11,17 +11,13 @@ import (
|
|||
authnlib "github.com/grafana/authlib/authn"
|
||||
claims "github.com/grafana/authlib/types"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/db"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana/common"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana/store"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/tests/testsuite"
|
||||
"github.com/grafana/grafana/pkg/util/testutil"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -79,12 +75,8 @@ func setup(t *testing.T, srv *Server) *Server {
|
|||
return setupOpenFGADatabase(t, srv, tuples)
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
testsuite.Run(m)
|
||||
}
|
||||
|
||||
func TestIntegrationServer(t *testing.T) {
|
||||
testutil.SkipIntegrationTestInShortMode(t)
|
||||
func setupOpenFGAServer(t *testing.T) *Server {
|
||||
t.Helper()
|
||||
|
||||
// Create a test-specific config to avoid migration conflicts
|
||||
cfg := setting.NewCfg()
|
||||
|
|
@ -99,67 +91,12 @@ func TestIntegrationServer(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
srv := setupOpenFGAServer(t, testStore, cfg)
|
||||
t.Run("test check", func(t *testing.T) {
|
||||
setup(t, srv)
|
||||
testCheck(t, srv)
|
||||
})
|
||||
|
||||
t.Run("test list", func(t *testing.T) {
|
||||
setup(t, srv)
|
||||
testList(t, srv)
|
||||
})
|
||||
|
||||
t.Run("test list streaming", func(t *testing.T) {
|
||||
setup(t, srv)
|
||||
srv.cfg.UseStreamedListObjects = true
|
||||
testList(t, srv)
|
||||
srv.cfg.UseStreamedListObjects = false
|
||||
})
|
||||
|
||||
t.Run("test mutate", func(t *testing.T) {
|
||||
testMutate(t, srv)
|
||||
})
|
||||
|
||||
t.Run("test mutate folders", func(t *testing.T) {
|
||||
testMutateFolders(t, srv)
|
||||
})
|
||||
|
||||
t.Run("test mutate resource permissions", func(t *testing.T) {
|
||||
testMutateResourcePermissions(t, srv)
|
||||
})
|
||||
|
||||
t.Run("test mutate org roles", func(t *testing.T) {
|
||||
testMutateOrgRoles(t, srv)
|
||||
})
|
||||
|
||||
t.Run("test query folders", func(t *testing.T) {
|
||||
testQueryFolders(t, srv)
|
||||
})
|
||||
|
||||
t.Run("test mutate role bindings", func(t *testing.T) {
|
||||
testMutateRoleBindings(t, srv)
|
||||
})
|
||||
|
||||
t.Run("test mutate team bindings", func(t *testing.T) {
|
||||
testMutateTeamBindings(t, srv)
|
||||
})
|
||||
|
||||
t.Run("test mutate roles", func(t *testing.T) {
|
||||
testMutateRoles(t, srv)
|
||||
})
|
||||
}
|
||||
|
||||
func setupOpenFGAServer(t *testing.T, testDB db.DB, cfg *setting.Cfg) *Server {
|
||||
t.Helper()
|
||||
|
||||
store, err := store.NewEmbeddedStore(cfg, testDB, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
openfga, err := NewOpenFGAServer(cfg.ZanzanaServer, store)
|
||||
srv, err := NewEmbeddedZanzanaServer(cfg, testStore, log.NewNopLogger(), tracing.NewNoopTracerService(), prometheus.NewRegistry())
|
||||
require.NoError(t, err)
|
||||
|
||||
srv, err := NewServer(cfg.ZanzanaServer, openfga, log.NewNopLogger(), tracing.NewNoopTracerService(), prometheus.NewRegistry())
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
srv.Close()
|
||||
})
|
||||
|
||||
return srv
|
||||
}
|
||||
|
|
@ -171,7 +108,7 @@ func setupOpenFGADatabase(t *testing.T, srv *Server, tuples []*openfgav1.TupleKe
|
|||
require.NoError(t, err)
|
||||
|
||||
// Clean up any existing store
|
||||
_, err = srv.openfga.DeleteStore(context.Background(), &openfgav1.DeleteStoreRequest{
|
||||
_, err = srv.openFGAClient.DeleteStore(context.Background(), &openfgav1.DeleteStoreRequest{
|
||||
StoreId: storeInf.ID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
|
@ -193,7 +130,7 @@ func setupOpenFGADatabase(t *testing.T, srv *Server, tuples []*openfgav1.TupleKe
|
|||
}
|
||||
|
||||
// Try to delete existing tuples (ignore errors if they don't exist)
|
||||
_, err = srv.openfga.Write(context.Background(), &openfgav1.WriteRequest{
|
||||
_, err = srv.openFGAClient.Write(context.Background(), &openfgav1.WriteRequest{
|
||||
StoreId: storeInf.ID,
|
||||
AuthorizationModelId: storeInf.ModelID,
|
||||
Deletes: &openfgav1.WriteRequestDeletes{
|
||||
|
|
@ -204,7 +141,7 @@ func setupOpenFGADatabase(t *testing.T, srv *Server, tuples []*openfgav1.TupleKe
|
|||
require.NoError(t, err)
|
||||
|
||||
// Now write the new tuples
|
||||
_, err = srv.openfga.Write(context.Background(), &openfgav1.WriteRequest{
|
||||
_, err = srv.openFGAClient.Write(context.Background(), &openfgav1.WriteRequest{
|
||||
StoreId: storeInf.ID,
|
||||
AuthorizationModelId: storeInf.ModelID,
|
||||
Writes: writes,
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ func (s *Server) write(ctx context.Context, req *authzextv1.WriteRequest) (*auth
|
|||
}
|
||||
}
|
||||
|
||||
_, err = s.openfga.Write(ctx, writeReq)
|
||||
_, err = s.openFGAClient.Write(ctx, writeReq)
|
||||
if err != nil {
|
||||
s.logger.Error("failed to perform openfga Write request", "error", errors.Unwrap(err))
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -3,20 +3,17 @@ package server
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana/common"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestWriteAuthorization(t *testing.T) {
|
||||
cfg := setting.NewCfg()
|
||||
testStore := sqlstore.NewTestStore(t, sqlstore.WithCfg(cfg))
|
||||
srv := setupOpenFGAServer(t, testStore, cfg)
|
||||
srv := setupOpenFGAServer(t)
|
||||
setup(t, srv)
|
||||
|
||||
req := &authzextv1.WriteRequest{
|
||||
|
|
|
|||
|
|
@ -2024,6 +2024,14 @@ var (
|
|||
Owner: grafanaDatavizSquad,
|
||||
Expression: "false",
|
||||
},
|
||||
{
|
||||
Name: "panelStyleActions",
|
||||
Description: "Enable style actions (copy/paste) in the panel editor",
|
||||
Stage: FeatureStageExperimental,
|
||||
FrontendOnly: true,
|
||||
Owner: grafanaDatavizSquad,
|
||||
Expression: "false",
|
||||
},
|
||||
{
|
||||
Name: "externalVizSuggestions",
|
||||
Description: "Enable all plugins to supply visualization suggestions (including 3rd party plugins)",
|
||||
|
|
|
|||
1
pkg/services/featuremgmt/toggles_gen.csv
generated
1
pkg/services/featuremgmt/toggles_gen.csv
generated
|
|
@ -253,6 +253,7 @@ Created,Name,Stage,Owner,requiresDevMode,RequiresRestart,FrontendOnly
|
|||
2025-10-20,newGauge,preview,@grafana/dataviz-squad,false,false,true
|
||||
2025-11-12,newVizSuggestions,preview,@grafana/dataviz-squad,false,false,true
|
||||
2025-12-02,externalVizSuggestions,experimental,@grafana/dataviz-squad,false,false,true
|
||||
2026-01-28,panelStyleActions,experimental,@grafana/dataviz-squad,false,false,true
|
||||
2025-12-18,heatmapRowsAxisOptions,experimental,@grafana/dataviz-squad,false,false,true
|
||||
2025-10-17,preventPanelChromeOverflow,preview,@grafana/grafana-frontend-platform,false,false,true
|
||||
2025-10-31,jaegerEnableGrpcEndpoint,experimental,@grafana/oss-big-tent,false,false,false
|
||||
|
|
|
|||
|
14
pkg/services/featuremgmt/toggles_gen.json
generated
14
pkg/services/featuremgmt/toggles_gen.json
generated
|
|
@ -3314,6 +3314,20 @@
|
|||
"expression": "false"
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"name": "panelStyleActions",
|
||||
"resourceVersion": "1769620237787",
|
||||
"creationTimestamp": "2026-01-28T17:10:37Z"
|
||||
},
|
||||
"spec": {
|
||||
"description": "Enable style actions (copy/paste) in the panel editor",
|
||||
"stage": "experimental",
|
||||
"codeowner": "@grafana/dataviz-squad",
|
||||
"frontend": true,
|
||||
"expression": "false"
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"name": "panelTimeSettings",
|
||||
|
|
|
|||
|
|
@ -272,20 +272,20 @@ type (
|
|||
type MergeResult definition.MergeResult
|
||||
|
||||
func (m MergeResult) LogContext() []any {
|
||||
if len(m.RenamedReceivers) == 0 && len(m.RenamedTimeIntervals) == 0 {
|
||||
if len(m.Receivers) == 0 && len(m.TimeIntervals) == 0 {
|
||||
return nil
|
||||
}
|
||||
logCtx := make([]any, 0, 4)
|
||||
if len(m.RenamedReceivers) > 0 {
|
||||
if len(m.Receivers) > 0 {
|
||||
rcvBuilder := strings.Builder{}
|
||||
for from, to := range m.RenamedReceivers {
|
||||
for from, to := range m.Receivers {
|
||||
rcvBuilder.WriteString(fmt.Sprintf("'%s'->'%s',", from, to))
|
||||
}
|
||||
logCtx = append(logCtx, "renamedReceivers", fmt.Sprintf("[%s]", rcvBuilder.String()[0:rcvBuilder.Len()-1]))
|
||||
}
|
||||
if len(m.RenamedTimeIntervals) > 0 {
|
||||
if len(m.TimeIntervals) > 0 {
|
||||
intervalBuilder := strings.Builder{}
|
||||
for from, to := range m.RenamedTimeIntervals {
|
||||
for from, to := range m.TimeIntervals {
|
||||
intervalBuilder.WriteString(fmt.Sprintf("'%s'->'%s',", from, to))
|
||||
}
|
||||
logCtx = append(logCtx, "renamedTimeIntervals", fmt.Sprintf("[%s]", intervalBuilder.String()[0:intervalBuilder.Len()-1]))
|
||||
|
|
|
|||
|
|
@ -221,7 +221,7 @@ func (cfg *Cfg) readZanzanaSettings() {
|
|||
zs := ZanzanaServerSettings{}
|
||||
serverSec := cfg.SectionWithEnvOverrides("zanzana.server")
|
||||
|
||||
zs.OpenFGAHttpAddr = serverSec.Key("http_addr").MustString("127.0.0.1:8080")
|
||||
zs.OpenFGAHttpAddr = serverSec.Key("http_addr").MustString("")
|
||||
zs.ListObjectsDeadline = serverSec.Key("list_objects_deadline").MustDuration(3 * time.Second)
|
||||
zs.ListObjectsMaxResults = uint32(serverSec.Key("list_objects_max_results").MustUint(1000))
|
||||
zs.UseStreamedListObjects = serverSec.Key("use_streamed_list_objects").MustBool(false)
|
||||
|
|
|
|||
|
|
@ -4,9 +4,10 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/dashboard/legacy"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
||||
authlib "github.com/grafana/authlib/types"
|
||||
|
||||
|
|
@ -34,10 +35,7 @@ type streamProvider interface {
|
|||
}
|
||||
|
||||
func buildCollectionSettings(opts legacy.MigrateOptions) resource.BulkSettings {
|
||||
settings := resource.BulkSettings{
|
||||
RebuildCollection: true,
|
||||
SkipValidation: true,
|
||||
}
|
||||
settings := resource.BulkSettings{SkipValidation: true}
|
||||
for _, res := range opts.Resources {
|
||||
key := buildResourceKey(res, opts.Namespace)
|
||||
if key != nil {
|
||||
|
|
|
|||
|
|
@ -22,13 +22,11 @@ import (
|
|||
)
|
||||
|
||||
const grpcMetaKeyCollection = "x-gf-batch-collection"
|
||||
const grpcMetaKeyRebuildCollection = "x-gf-batch-rebuild-collection"
|
||||
const grpcMetaKeySkipValidation = "x-gf-batch-skip-validation"
|
||||
|
||||
// Logged in trace.
|
||||
var metadataKeys = []string{
|
||||
grpcMetaKeyCollection,
|
||||
grpcMetaKeyRebuildCollection,
|
||||
grpcMetaKeySkipValidation,
|
||||
}
|
||||
|
||||
|
|
@ -64,10 +62,6 @@ type BulkSettings struct {
|
|||
// All requests will be within this namespace/group/resource
|
||||
Collection []*resourcepb.ResourceKey
|
||||
|
||||
// The batch will include everything from the collection
|
||||
// - all existing values will be removed/replaced if the batch completes successfully
|
||||
RebuildCollection bool
|
||||
|
||||
// The byte[] payload and folder has already been validated - no need to decode and verify
|
||||
SkipValidation bool
|
||||
}
|
||||
|
|
@ -79,9 +73,6 @@ func (x *BulkSettings) ToMD() metadata.MD {
|
|||
md[grpcMetaKeyCollection] = append(md[grpcMetaKeyCollection], SearchID(v))
|
||||
}
|
||||
}
|
||||
if x.RebuildCollection {
|
||||
md[grpcMetaKeyRebuildCollection] = []string{"true"}
|
||||
}
|
||||
if x.SkipValidation {
|
||||
md[grpcMetaKeySkipValidation] = []string{"true"}
|
||||
}
|
||||
|
|
@ -101,8 +92,6 @@ func NewBulkSettings(md metadata.MD) (BulkSettings, error) {
|
|||
}
|
||||
settings.Collection = append(settings.Collection, key)
|
||||
}
|
||||
case grpcMetaKeyRebuildCollection:
|
||||
settings.RebuildCollection = grpcMetaValueIsTrue(v)
|
||||
case grpcMetaKeySkipValidation:
|
||||
settings.SkipValidation = grpcMetaValueIsTrue(v)
|
||||
}
|
||||
|
|
@ -187,48 +176,39 @@ func (s *server) BulkProcess(stream resourcepb.BulkStore_BulkProcessServer) erro
|
|||
}
|
||||
}
|
||||
|
||||
if settings.RebuildCollection {
|
||||
for _, k := range settings.Collection {
|
||||
// Can we delete the whole collection
|
||||
rsp, err := s.access.Check(ctx, user, authlib.CheckRequest{
|
||||
Namespace: k.Namespace,
|
||||
Group: k.Group,
|
||||
Resource: k.Resource,
|
||||
Verb: utils.VerbDeleteCollection,
|
||||
}, "")
|
||||
if err != nil || !rsp.Allowed {
|
||||
return sendAndClose(&resourcepb.BulkResponse{
|
||||
Error: &resourcepb.ErrorResult{
|
||||
Message: fmt.Sprintf("Requester must be able to: %s", utils.VerbDeleteCollection),
|
||||
Code: http.StatusForbidden,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// This will be called for each request -- with the folder ID
|
||||
//nolint:staticcheck // SA1019: Compile is deprecated but BatchCheck is not yet fully implemented
|
||||
runner.checker[NSGR(k)], _, err = s.access.Compile(ctx, user, authlib.ListRequest{
|
||||
Namespace: k.Namespace,
|
||||
Group: k.Group,
|
||||
Resource: k.Resource,
|
||||
Verb: utils.VerbCreate,
|
||||
for _, k := range settings.Collection {
|
||||
// Can we delete the whole collection
|
||||
rsp, err := s.access.Check(ctx, user, authlib.CheckRequest{
|
||||
Namespace: k.Namespace,
|
||||
Group: k.Group,
|
||||
Resource: k.Resource,
|
||||
Verb: utils.VerbDeleteCollection,
|
||||
}, "")
|
||||
if err != nil || !rsp.Allowed {
|
||||
return sendAndClose(&resourcepb.BulkResponse{
|
||||
Error: &resourcepb.ErrorResult{
|
||||
Message: fmt.Sprintf("Requester must be able to: %s", utils.VerbDeleteCollection),
|
||||
Code: http.StatusForbidden,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return sendAndClose(&resourcepb.BulkResponse{
|
||||
Error: &resourcepb.ErrorResult{
|
||||
Message: "Unable to check `create` permission",
|
||||
Code: http.StatusForbidden,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return sendAndClose(&resourcepb.BulkResponse{
|
||||
Error: &resourcepb.ErrorResult{
|
||||
Message: "Bulk currently only supports RebuildCollection",
|
||||
Code: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// This will be called for each request -- with the folder ID
|
||||
//nolint:staticcheck // SA1019: Compile is deprecated but BatchCheck is not yet fully implemented
|
||||
runner.checker[NSGR(k)], _, err = s.access.Compile(ctx, user, authlib.ListRequest{
|
||||
Namespace: k.Namespace,
|
||||
Group: k.Group,
|
||||
Resource: k.Resource,
|
||||
Verb: utils.VerbCreate,
|
||||
})
|
||||
if err != nil {
|
||||
return sendAndClose(&resourcepb.BulkResponse{
|
||||
Error: &resourcepb.ErrorResult{
|
||||
Message: "Unable to check `create` permission",
|
||||
Code: http.StatusForbidden,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
backend, ok := s.backend.(BulkProcessingBackend)
|
||||
|
|
|
|||
|
|
@ -1322,67 +1322,57 @@ func (b *kvStorageBackend) ProcessBulk(ctx context.Context, setting BulkSettings
|
|||
summaries := make(map[string]*resourcepb.BulkResponse_Summary, len(setting.Collection))
|
||||
rsp := &resourcepb.BulkResponse{}
|
||||
|
||||
if setting.RebuildCollection {
|
||||
for _, key := range setting.Collection {
|
||||
events := make([]string, 0)
|
||||
for evtKeyStr, err := range b.eventStore.ListKeysSince(ctx, 1, SortOrderAsc) {
|
||||
if err != nil {
|
||||
b.log.Error("failed to list event: %s", err)
|
||||
return rsp
|
||||
}
|
||||
|
||||
evtKey, err := ParseEventKey(evtKeyStr)
|
||||
if err != nil {
|
||||
b.log.Error("error parsing event key: %s", err)
|
||||
return rsp
|
||||
}
|
||||
|
||||
if evtKey.Group != key.Group || evtKey.Resource != key.Resource || evtKey.Namespace != key.Namespace {
|
||||
continue
|
||||
}
|
||||
|
||||
events = append(events, evtKeyStr)
|
||||
}
|
||||
|
||||
if err := b.eventStore.batchDelete(ctx, events); err != nil {
|
||||
b.log.Error("failed to delete events: %s", err)
|
||||
for _, key := range setting.Collection {
|
||||
events := make([]string, 0)
|
||||
for evtKeyStr, err := range b.eventStore.ListKeysSince(ctx, 1, SortOrderAsc) {
|
||||
if err != nil {
|
||||
b.log.Error("failed to list event: %s", err)
|
||||
return rsp
|
||||
}
|
||||
|
||||
historyKeys := make([]DataKey, 0)
|
||||
|
||||
for dataKey, err := range b.dataStore.Keys(ctx, ListRequestKey{
|
||||
Namespace: key.Namespace,
|
||||
Group: key.Group,
|
||||
Resource: key.Resource,
|
||||
}, SortOrderAsc) {
|
||||
if err != nil {
|
||||
b.log.Error("failed to list collection before delete: %s", err)
|
||||
return rsp
|
||||
}
|
||||
|
||||
historyKeys = append(historyKeys, dataKey)
|
||||
}
|
||||
|
||||
previousCount := int64(len(historyKeys))
|
||||
if err := b.dataStore.batchDelete(ctx, historyKeys); err != nil {
|
||||
b.log.Error("failed to delete collection: %s", err)
|
||||
evtKey, err := ParseEventKey(evtKeyStr)
|
||||
if err != nil {
|
||||
b.log.Error("error parsing event key: %s", err)
|
||||
return rsp
|
||||
}
|
||||
summaries[NSGR(key)] = &resourcepb.BulkResponse_Summary{
|
||||
Namespace: key.Namespace,
|
||||
Group: key.Group,
|
||||
Resource: key.Resource,
|
||||
PreviousCount: previousCount,
|
||||
|
||||
if evtKey.Group != key.Group || evtKey.Resource != key.Resource || evtKey.Namespace != key.Namespace {
|
||||
continue
|
||||
}
|
||||
|
||||
events = append(events, evtKeyStr)
|
||||
}
|
||||
} else {
|
||||
for _, key := range setting.Collection {
|
||||
summaries[NSGR(key)] = &resourcepb.BulkResponse_Summary{
|
||||
Namespace: key.Namespace,
|
||||
Group: key.Group,
|
||||
Resource: key.Resource,
|
||||
|
||||
if err := b.eventStore.batchDelete(ctx, events); err != nil {
|
||||
b.log.Error("failed to delete events: %s", err)
|
||||
return rsp
|
||||
}
|
||||
|
||||
historyKeys := make([]DataKey, 0)
|
||||
|
||||
for dataKey, err := range b.dataStore.Keys(ctx, ListRequestKey{
|
||||
Namespace: key.Namespace,
|
||||
Group: key.Group,
|
||||
Resource: key.Resource,
|
||||
}, SortOrderAsc) {
|
||||
if err != nil {
|
||||
b.log.Error("failed to list collection before delete: %s", err)
|
||||
return rsp
|
||||
}
|
||||
|
||||
historyKeys = append(historyKeys, dataKey)
|
||||
}
|
||||
|
||||
previousCount := int64(len(historyKeys))
|
||||
if err := b.dataStore.batchDelete(ctx, historyKeys); err != nil {
|
||||
b.log.Error("failed to delete collection: %s", err)
|
||||
return rsp
|
||||
}
|
||||
summaries[NSGR(key)] = &resourcepb.BulkResponse_Summary{
|
||||
Namespace: key.Namespace,
|
||||
Group: key.Group,
|
||||
Resource: key.Resource,
|
||||
PreviousCount: previousCount,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -225,23 +225,13 @@ func (b *backend) processBulkWithTx(ctx context.Context, tx db.Tx, setting resou
|
|||
summaries := make(map[string]*resourcepb.BulkResponse_Summary, len(setting.Collection))
|
||||
|
||||
// First clear everything in the transaction
|
||||
if setting.RebuildCollection {
|
||||
for _, key := range setting.Collection {
|
||||
summary, err := bulk.deleteCollection(key)
|
||||
if err != nil {
|
||||
return rollbackWithError(err)
|
||||
}
|
||||
summaries[resource.NSGR(key)] = summary
|
||||
rsp.Summary = append(rsp.Summary, summary)
|
||||
}
|
||||
} else {
|
||||
for _, key := range setting.Collection {
|
||||
summaries[resource.NSGR(key)] = &resourcepb.BulkResponse_Summary{
|
||||
Namespace: key.Namespace,
|
||||
Group: key.Group,
|
||||
Resource: key.Resource,
|
||||
}
|
||||
for _, key := range setting.Collection {
|
||||
summary, err := bulk.deleteCollection(key)
|
||||
if err != nil {
|
||||
return rollbackWithError(err)
|
||||
}
|
||||
summaries[resource.NSGR(key)] = summary
|
||||
rsp.Summary = append(rsp.Summary, summary)
|
||||
}
|
||||
|
||||
obj := &unstructured.Unstructured{}
|
||||
|
|
|
|||
|
|
@ -1501,10 +1501,7 @@ func runTestIntegrationGetResourceLastImportTime(t *testing.T, backend resource.
|
|||
},
|
||||
}
|
||||
|
||||
resp := bulk.ProcessBulk(ctx, resource.BulkSettings{
|
||||
Collection: collections,
|
||||
RebuildCollection: true,
|
||||
}, toBulkIterator(bulkRequests))
|
||||
resp := bulk.ProcessBulk(ctx, resource.BulkSettings{Collection: collections}, toBulkIterator(bulkRequests))
|
||||
require.Nil(t, resp.Error)
|
||||
|
||||
result := collectLastImportedTimes(t, backend, ctx)
|
||||
|
|
@ -1540,10 +1537,7 @@ func runTestIntegrationGetResourceLastImportTime(t *testing.T, backend resource.
|
|||
Value: nil,
|
||||
}}
|
||||
|
||||
resp1 := bulk.ProcessBulk(ctx, resource.BulkSettings{
|
||||
Collection: collections1,
|
||||
RebuildCollection: true,
|
||||
}, toBulkIterator(bulkRequests1))
|
||||
resp1 := bulk.ProcessBulk(ctx, resource.BulkSettings{Collection: collections1}, toBulkIterator(bulkRequests1))
|
||||
require.Nil(t, resp1.Error)
|
||||
|
||||
firstImport := time.Now()
|
||||
|
|
@ -1571,10 +1565,7 @@ func runTestIntegrationGetResourceLastImportTime(t *testing.T, backend resource.
|
|||
Value: nil,
|
||||
}}
|
||||
|
||||
resp2 := bulk.ProcessBulk(ctx, resource.BulkSettings{
|
||||
Collection: collections2,
|
||||
RebuildCollection: false,
|
||||
}, toBulkIterator(bulkRequests2))
|
||||
resp2 := bulk.ProcessBulk(ctx, resource.BulkSettings{Collection: collections2}, toBulkIterator(bulkRequests2))
|
||||
require.Nil(t, resp2.Error)
|
||||
|
||||
secondImport := time.Now()
|
||||
|
|
|
|||
|
|
@ -382,6 +382,9 @@ func (db *mysql) GetColumns(tableName string) ([]string, map[string]*core.Column
|
|||
if colType == "DOUBLE UNSIGNED" {
|
||||
colType = "DOUBLE"
|
||||
}
|
||||
if colType == "BIGINT UNSIGNED" {
|
||||
colType = "BIGINT"
|
||||
}
|
||||
col.Length = len1
|
||||
col.Length2 = len2
|
||||
if _, ok := core.SqlTypes[colType]; ok {
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ export const DEFAULT_ROW_HEIGHT = 250;
|
|||
export const MIN_PANEL_HEIGHT = GRID_CELL_HEIGHT * 3;
|
||||
|
||||
export const LS_PANEL_COPY_KEY = 'panel-copy';
|
||||
export const LS_STYLES_COPY_KEY = 'styles-copy';
|
||||
export const LS_ROW_COPY_KEY = 'row-copy';
|
||||
export const LS_TAB_COPY_KEY = 'tab-copy';
|
||||
export const PANEL_BORDER = 2;
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import {
|
|||
import { Dashboard, DashboardCursorSync, LibraryPanel } from '@grafana/schema';
|
||||
import { Spec as DashboardV2Spec } from '@grafana/schema/dist/esm/schema/dashboard/v2';
|
||||
import { appEvents } from 'app/core/app_events';
|
||||
import { LS_PANEL_COPY_KEY } from 'app/core/constants';
|
||||
import { LS_PANEL_COPY_KEY, LS_STYLES_COPY_KEY } from 'app/core/constants';
|
||||
import { AnnoKeyManagerKind, ManagerKind } from 'app/features/apiserver/types';
|
||||
import { getDashboardSrv } from 'app/features/dashboard/services/DashboardSrv';
|
||||
import { DecoratedRevisionModel } from 'app/features/dashboard/types/revisionModels';
|
||||
|
|
@ -39,6 +39,7 @@ import { createWorker } from '../saving/createDetectChangesWorker';
|
|||
import { buildGridItemForPanel, transformSaveModelToScene } from '../serialization/transformSaveModelToScene';
|
||||
import { getCloneKey } from '../utils/clone';
|
||||
import { dashboardSceneGraph } from '../utils/dashboardSceneGraph';
|
||||
import { DashboardInteractions } from '../utils/interactions';
|
||||
import { findVizPanelByKey, getLibraryPanelBehavior, isLibraryPanel } from '../utils/utils';
|
||||
import * as utils from '../utils/utils';
|
||||
|
||||
|
|
@ -632,6 +633,196 @@ describe('DashboardScene', () => {
|
|||
expect(store.exists(LS_PANEL_COPY_KEY)).toBe(false);
|
||||
});
|
||||
|
||||
describe('Copy/Paste panel styles', () => {
|
||||
const createTimeseriesPanel = () => {
|
||||
return new VizPanel({
|
||||
title: 'Timeseries Panel',
|
||||
key: `panel-timeseries-${Math.random()}`,
|
||||
pluginId: 'timeseries',
|
||||
fieldConfig: {
|
||||
defaults: {
|
||||
color: { mode: 'palette-classic' },
|
||||
custom: {
|
||||
lineWidth: 1,
|
||||
fillOpacity: 10,
|
||||
},
|
||||
},
|
||||
overrides: [],
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
store.delete(LS_STYLES_COPY_KEY);
|
||||
config.featureToggles.panelStyleActions = true;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
config.featureToggles.panelStyleActions = false;
|
||||
});
|
||||
|
||||
it('Should copy panel styles when feature flag is enabled', () => {
|
||||
const spy = jest.spyOn(DashboardInteractions, 'panelStylesMenuClicked');
|
||||
const timeseriesPanel = createTimeseriesPanel();
|
||||
|
||||
scene.copyPanelStyles(timeseriesPanel);
|
||||
|
||||
expect(store.exists(LS_STYLES_COPY_KEY)).toBe(true);
|
||||
const stored = JSON.parse(store.get(LS_STYLES_COPY_KEY) || '{}');
|
||||
expect(stored.panelType).toBe('timeseries');
|
||||
expect(stored.styles).toBeDefined();
|
||||
expect(spy).not.toHaveBeenCalled(); // Analytics only called from menu
|
||||
});
|
||||
|
||||
it('Should not copy panel styles when feature flag is disabled', () => {
|
||||
config.featureToggles.panelStyleActions = false;
|
||||
const timeseriesPanel = createTimeseriesPanel();
|
||||
|
||||
scene.copyPanelStyles(timeseriesPanel);
|
||||
|
||||
expect(store.exists(LS_STYLES_COPY_KEY)).toBe(false);
|
||||
});
|
||||
|
||||
it('Should not copy styles for non-timeseries panels', () => {
|
||||
const vizPanel = findVizPanelByKey(scene, 'panel-1')!;
|
||||
scene.copyPanelStyles(vizPanel);
|
||||
|
||||
expect(store.exists(LS_STYLES_COPY_KEY)).toBe(false);
|
||||
});
|
||||
|
||||
it('Should return false for hasPanelStylesToPaste when no styles copied', () => {
|
||||
expect(DashboardScene.hasPanelStylesToPaste('timeseries')).toBe(false);
|
||||
});
|
||||
|
||||
it('Should return false for hasPanelStylesToPaste when feature flag is disabled', () => {
|
||||
store.set(LS_STYLES_COPY_KEY, JSON.stringify({ panelType: 'timeseries', styles: {} }));
|
||||
config.featureToggles.panelStyleActions = false;
|
||||
|
||||
expect(DashboardScene.hasPanelStylesToPaste('timeseries')).toBe(false);
|
||||
});
|
||||
|
||||
it('Should return true for hasPanelStylesToPaste when styles exist for matching panel type', () => {
|
||||
store.set(LS_STYLES_COPY_KEY, JSON.stringify({ panelType: 'timeseries', styles: {} }));
|
||||
|
||||
expect(DashboardScene.hasPanelStylesToPaste('timeseries')).toBe(true);
|
||||
});
|
||||
|
||||
it('Should return false for hasPanelStylesToPaste for different panel type', () => {
|
||||
store.set(LS_STYLES_COPY_KEY, JSON.stringify({ panelType: 'timeseries', styles: {} }));
|
||||
|
||||
expect(DashboardScene.hasPanelStylesToPaste('table')).toBe(false);
|
||||
});
|
||||
|
||||
it('Should paste panel styles when feature flag is enabled', () => {
|
||||
const spy = jest.spyOn(DashboardInteractions, 'panelStylesMenuClicked');
|
||||
const timeseriesPanel = createTimeseriesPanel();
|
||||
const mockOnFieldConfigChange = jest.fn();
|
||||
timeseriesPanel.onFieldConfigChange = mockOnFieldConfigChange;
|
||||
|
||||
const styles = {
|
||||
panelType: 'timeseries',
|
||||
styles: {
|
||||
fieldConfig: {
|
||||
defaults: {
|
||||
color: { mode: 'palette-classic' },
|
||||
custom: {
|
||||
lineWidth: 2,
|
||||
fillOpacity: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
store.set(LS_STYLES_COPY_KEY, JSON.stringify(styles));
|
||||
|
||||
scene.pastePanelStyles(timeseriesPanel);
|
||||
|
||||
expect(mockOnFieldConfigChange).toHaveBeenCalled();
|
||||
expect(store.exists(LS_STYLES_COPY_KEY)).toBe(true);
|
||||
expect(spy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('Should not paste panel styles when feature flag is disabled', () => {
|
||||
config.featureToggles.panelStyleActions = false;
|
||||
const timeseriesPanel = createTimeseriesPanel();
|
||||
const mockOnFieldConfigChange = jest.fn();
|
||||
timeseriesPanel.onFieldConfigChange = mockOnFieldConfigChange;
|
||||
|
||||
const styles = {
|
||||
panelType: 'timeseries',
|
||||
styles: { fieldConfig: { defaults: {} } },
|
||||
};
|
||||
store.set(LS_STYLES_COPY_KEY, JSON.stringify(styles));
|
||||
|
||||
scene.pastePanelStyles(timeseriesPanel);
|
||||
|
||||
expect(mockOnFieldConfigChange).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('Should not paste styles when no styles are copied', () => {
|
||||
const timeseriesPanel = createTimeseriesPanel();
|
||||
const mockOnFieldConfigChange = jest.fn();
|
||||
timeseriesPanel.onFieldConfigChange = mockOnFieldConfigChange;
|
||||
|
||||
scene.pastePanelStyles(timeseriesPanel);
|
||||
|
||||
expect(mockOnFieldConfigChange).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('Should not paste styles to different panel type', () => {
|
||||
const spy = jest.spyOn(DashboardInteractions, 'panelStylesMenuClicked');
|
||||
const timeseriesPanel = createTimeseriesPanel();
|
||||
const mockOnFieldConfigChange = jest.fn();
|
||||
timeseriesPanel.onFieldConfigChange = mockOnFieldConfigChange;
|
||||
|
||||
const styles = {
|
||||
panelType: 'table',
|
||||
styles: { fieldConfig: { defaults: {} } },
|
||||
};
|
||||
store.set(LS_STYLES_COPY_KEY, JSON.stringify(styles));
|
||||
|
||||
scene.pastePanelStyles(timeseriesPanel);
|
||||
|
||||
expect(mockOnFieldConfigChange).not.toHaveBeenCalled();
|
||||
expect(spy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('Should allow pasting styles multiple times', () => {
|
||||
const spy = jest.spyOn(DashboardInteractions, 'panelStylesMenuClicked');
|
||||
const timeseriesPanel1 = createTimeseriesPanel();
|
||||
const timeseriesPanel2 = createTimeseriesPanel();
|
||||
const mockOnFieldConfigChange1 = jest.fn();
|
||||
const mockOnFieldConfigChange2 = jest.fn();
|
||||
timeseriesPanel1.onFieldConfigChange = mockOnFieldConfigChange1;
|
||||
timeseriesPanel2.onFieldConfigChange = mockOnFieldConfigChange2;
|
||||
|
||||
const styles = {
|
||||
panelType: 'timeseries',
|
||||
styles: { fieldConfig: { defaults: { custom: { lineWidth: 3 } } } },
|
||||
};
|
||||
store.set(LS_STYLES_COPY_KEY, JSON.stringify(styles));
|
||||
|
||||
scene.pastePanelStyles(timeseriesPanel1);
|
||||
expect(mockOnFieldConfigChange1).toHaveBeenCalled();
|
||||
expect(store.exists(LS_STYLES_COPY_KEY)).toBe(true);
|
||||
|
||||
scene.pastePanelStyles(timeseriesPanel2);
|
||||
expect(mockOnFieldConfigChange2).toHaveBeenCalled();
|
||||
expect(store.exists(LS_STYLES_COPY_KEY)).toBe(true);
|
||||
expect(spy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('Should report analytics on paste error', () => {
|
||||
const spy = jest.spyOn(DashboardInteractions, 'panelStylesMenuClicked');
|
||||
jest.spyOn(console, 'error').mockImplementation();
|
||||
|
||||
store.set(LS_STYLES_COPY_KEY, 'invalid json');
|
||||
scene.pastePanelStyles(createTimeseriesPanel());
|
||||
|
||||
expect(spy).toHaveBeenCalledWith('paste', 'timeseries', expect.any(Number), true);
|
||||
});
|
||||
});
|
||||
|
||||
it('Should unlink a library panel', () => {
|
||||
const libPanel = new VizPanel({
|
||||
title: 'Panel B',
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import * as H from 'history';
|
||||
|
||||
import { CoreApp, DataQueryRequest, locationUtil, NavIndex, NavModelItem, store } from '@grafana/data';
|
||||
import { CoreApp, DataQueryRequest, FieldConfig, locationUtil, NavIndex, NavModelItem, store } from '@grafana/data';
|
||||
import { t } from '@grafana/i18n';
|
||||
import { config, locationService, RefreshEvent } from '@grafana/runtime';
|
||||
import {
|
||||
|
|
@ -19,7 +19,7 @@ import { Dashboard, DashboardLink, LibraryPanel } from '@grafana/schema';
|
|||
import { Spec as DashboardV2Spec } from '@grafana/schema/dist/esm/schema/dashboard/v2';
|
||||
import { appEvents } from 'app/core/app_events';
|
||||
import { ScrollRefElement } from 'app/core/components/NativeScrollbar';
|
||||
import { LS_PANEL_COPY_KEY } from 'app/core/constants';
|
||||
import { LS_PANEL_COPY_KEY, LS_STYLES_COPY_KEY } from 'app/core/constants';
|
||||
import { getNavModel } from 'app/core/selectors/navModel';
|
||||
import { sortedDeepCloneWithoutNulls } from 'app/core/utils/object';
|
||||
import { getDashboardAPI } from 'app/features/dashboard/api/dashboard_api';
|
||||
|
|
@ -34,6 +34,7 @@ import { DecoratedRevisionModel } from 'app/features/dashboard/types/revisionMod
|
|||
import { dashboardWatcher } from 'app/features/live/dashboard/dashboardWatcher';
|
||||
import { DashboardJson } from 'app/features/manage-dashboards/types';
|
||||
import { VariablesChanged } from 'app/features/variables/types';
|
||||
import { defaultGraphStyleConfig } from 'app/plugins/panel/timeseries/config';
|
||||
import { DashboardDTO, DashboardMeta, SaveDashboardResponseDTO } from 'app/types/dashboard';
|
||||
import { ShowConfirmModalEvent } from 'app/types/events';
|
||||
|
||||
|
|
@ -69,6 +70,7 @@ import { isRepeatCloneOrChildOf } from '../utils/clone';
|
|||
import { dashboardSceneGraph } from '../utils/dashboardSceneGraph';
|
||||
import { djb2Hash } from '../utils/djb2Hash';
|
||||
import { getDashboardUrl } from '../utils/getDashboardUrl';
|
||||
import { DashboardInteractions } from '../utils/interactions';
|
||||
import {
|
||||
getClosestVizPanel,
|
||||
getDashboardSceneFor,
|
||||
|
|
@ -98,6 +100,15 @@ export const PERSISTED_PROPS = ['title', 'description', 'tags', 'editable', 'gra
|
|||
export const PANEL_SEARCH_VAR = 'systemPanelFilterVar';
|
||||
export const PANELS_PER_ROW_VAR = 'systemDynamicRowSizeVar';
|
||||
|
||||
type PanelStyles = {
|
||||
fieldConfig?: { defaults: Partial<FieldConfig> };
|
||||
};
|
||||
|
||||
type CopiedPanelStyles = {
|
||||
panelType: string;
|
||||
styles: PanelStyles;
|
||||
};
|
||||
|
||||
export interface DashboardSceneState extends SceneObjectState {
|
||||
/** The title */
|
||||
title: string;
|
||||
|
|
@ -651,6 +662,145 @@ export class DashboardScene extends SceneObjectBase<DashboardSceneState> impleme
|
|||
store.delete(LS_PANEL_COPY_KEY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Hardcoded to Timeseries for this PoC
|
||||
* @internal
|
||||
*/
|
||||
private static extractPanelStyles(panel: VizPanel): PanelStyles {
|
||||
const styles: PanelStyles = {};
|
||||
|
||||
if (!panel.state.fieldConfig?.defaults) {
|
||||
return styles;
|
||||
}
|
||||
|
||||
styles.fieldConfig = { defaults: {} };
|
||||
|
||||
const defaults = styles.fieldConfig.defaults;
|
||||
const panelDefaults = panel.state.fieldConfig.defaults;
|
||||
|
||||
// default props (color)
|
||||
if (defaultGraphStyleConfig.fieldConfig?.defaultsProps) {
|
||||
for (const key of defaultGraphStyleConfig.fieldConfig.defaultsProps) {
|
||||
const value = panelDefaults[key];
|
||||
if (value !== undefined) {
|
||||
defaults[key] = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// custom props (lineWidth, fillOpacity, etc.)
|
||||
if (panel.state.fieldConfig.defaults.custom && defaultGraphStyleConfig.fieldConfig?.defaults) {
|
||||
const customDefaults: Record<string, unknown> = {};
|
||||
const panelCustom: Record<string, unknown> = panel.state.fieldConfig.defaults.custom;
|
||||
|
||||
for (const key of defaultGraphStyleConfig.fieldConfig.defaults) {
|
||||
const value = panelCustom[key];
|
||||
if (value !== undefined) {
|
||||
customDefaults[key] = value;
|
||||
}
|
||||
}
|
||||
|
||||
defaults.custom = customDefaults;
|
||||
}
|
||||
|
||||
return styles;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
public copyPanelStyles(vizPanel: VizPanel) {
|
||||
if (!config.featureToggles.panelStyleActions) {
|
||||
return;
|
||||
}
|
||||
|
||||
const panelType = vizPanel.state.pluginId;
|
||||
|
||||
if (panelType !== 'timeseries') {
|
||||
return;
|
||||
}
|
||||
|
||||
const stylesToCopy: CopiedPanelStyles = {
|
||||
panelType,
|
||||
styles: DashboardScene.extractPanelStyles(vizPanel),
|
||||
};
|
||||
|
||||
store.set(LS_STYLES_COPY_KEY, JSON.stringify(stylesToCopy));
|
||||
appEvents.emit('alert-success', ['Panel styles copied.']);
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
public static hasPanelStylesToPaste(panelType: string): boolean {
|
||||
if (!config.featureToggles.panelStyleActions) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const stylesJson = store.get(LS_STYLES_COPY_KEY);
|
||||
if (!stylesJson) {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
const stylesCopy: CopiedPanelStyles = JSON.parse(stylesJson);
|
||||
return stylesCopy.panelType === panelType;
|
||||
} catch (e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
public pastePanelStyles(vizPanel: VizPanel) {
|
||||
if (!config.featureToggles.panelStyleActions) {
|
||||
return;
|
||||
}
|
||||
|
||||
const stylesJson = store.get(LS_STYLES_COPY_KEY);
|
||||
if (!stylesJson) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const stylesCopy: CopiedPanelStyles = JSON.parse(stylesJson);
|
||||
|
||||
const panelType = vizPanel.state.pluginId;
|
||||
|
||||
if (stylesCopy.panelType !== panelType) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!stylesCopy.styles.fieldConfig?.defaults) {
|
||||
return;
|
||||
}
|
||||
|
||||
const newDefaults = {
|
||||
...vizPanel.state.fieldConfig?.defaults,
|
||||
...stylesCopy.styles.fieldConfig.defaults,
|
||||
};
|
||||
|
||||
if (stylesCopy.styles.fieldConfig.defaults.custom) {
|
||||
newDefaults.custom = {
|
||||
...vizPanel.state.fieldConfig?.defaults?.custom,
|
||||
...stylesCopy.styles.fieldConfig.defaults.custom,
|
||||
};
|
||||
}
|
||||
|
||||
const newFieldConfig = {
|
||||
...vizPanel.state.fieldConfig,
|
||||
defaults: newDefaults,
|
||||
};
|
||||
vizPanel.onFieldConfigChange(newFieldConfig);
|
||||
|
||||
appEvents.emit('alert-success', ['Panel styles applied.']);
|
||||
} catch (e) {
|
||||
console.error('Error pasting panel styles:', e);
|
||||
appEvents.emit('alert-error', ['Error pasting panel styles.']);
|
||||
DashboardInteractions.panelStylesMenuClicked(
|
||||
'paste',
|
||||
vizPanel.state.pluginId,
|
||||
getPanelIdForVizPanel(vizPanel) ?? -1,
|
||||
true
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
public removePanel(panel: VizPanel) {
|
||||
getLayoutManagerFor(panel).removePanel?.(panel);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import {
|
|||
PluginExtensionPanelContext,
|
||||
PluginExtensionTypes,
|
||||
getDefaultTimeRange,
|
||||
store,
|
||||
toDataFrame,
|
||||
urlUtil,
|
||||
} from '@grafana/data';
|
||||
|
|
@ -18,6 +19,7 @@ import {
|
|||
VizPanel,
|
||||
VizPanelMenu,
|
||||
} from '@grafana/scenes';
|
||||
import { LS_STYLES_COPY_KEY } from 'app/core/constants';
|
||||
import { contextSrv } from 'app/core/services/context_srv';
|
||||
import { GetExploreUrlArguments } from 'app/core/utils/explore';
|
||||
import { grantUserPermissions } from 'app/features/alerting/unified/mocks';
|
||||
|
|
@ -26,6 +28,7 @@ import * as storeModule from 'app/store/store';
|
|||
import { AccessControlAction } from 'app/types/accessControl';
|
||||
|
||||
import { buildPanelEditScene } from '../panel-edit/PanelEditor';
|
||||
import { DashboardInteractions } from '../utils/interactions';
|
||||
|
||||
import { DashboardScene } from './DashboardScene';
|
||||
import { VizPanelLinks, VizPanelLinksMenu } from './PanelLinks';
|
||||
|
|
@ -849,6 +852,75 @@ describe('panelMenuBehavior', () => {
|
|||
jest.restoreAllMocks();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Panel styles menu', () => {
|
||||
async function buildTimeseriesTestScene() {
|
||||
const menu = new VizPanelMenu({ $behaviors: [panelMenuBehavior] });
|
||||
const panel = new VizPanel({
|
||||
title: 'Timeseries Panel',
|
||||
pluginId: 'timeseries',
|
||||
key: 'panel-ts',
|
||||
menu,
|
||||
});
|
||||
|
||||
panel.getPlugin = () => getPanelPlugin({ skipDataQuery: false });
|
||||
|
||||
new DashboardScene({
|
||||
title: 'My dashboard',
|
||||
uid: 'dash-1',
|
||||
meta: { canEdit: true },
|
||||
body: DefaultGridLayoutManager.fromVizPanels([panel]),
|
||||
});
|
||||
|
||||
menu.activate();
|
||||
await new Promise((r) => setTimeout(r, 1));
|
||||
|
||||
return { menu, panel };
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
config.featureToggles.panelStyleActions = true;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
config.featureToggles.panelStyleActions = false;
|
||||
store.delete(LS_STYLES_COPY_KEY);
|
||||
});
|
||||
|
||||
it('should call analytics when copy styles is clicked', async () => {
|
||||
const spy = jest.spyOn(DashboardInteractions, 'panelStylesMenuClicked');
|
||||
const { menu } = await buildTimeseriesTestScene();
|
||||
|
||||
const copyItem = menu.state.items?.find((i) => i.text === 'Styles')?.subMenu?.[0];
|
||||
copyItem?.onClick?.({} as never);
|
||||
|
||||
expect(spy).toHaveBeenCalledWith('copy', 'timeseries', expect.any(Number));
|
||||
});
|
||||
|
||||
it('should call analytics when paste styles is clicked', async () => {
|
||||
store.set(LS_STYLES_COPY_KEY, JSON.stringify({ panelType: 'timeseries', styles: {} }));
|
||||
const spy = jest.spyOn(DashboardInteractions, 'panelStylesMenuClicked');
|
||||
const { menu } = await buildTimeseriesTestScene();
|
||||
|
||||
const pasteItem = menu.state.items?.find((i) => i.text === 'Styles')?.subMenu?.[1];
|
||||
pasteItem?.onClick?.({} as never);
|
||||
|
||||
expect(spy).toHaveBeenCalledWith('paste', 'timeseries', expect.any(Number));
|
||||
});
|
||||
|
||||
it('should not show styles menu when feature flag is disabled', async () => {
|
||||
config.featureToggles.panelStyleActions = false;
|
||||
const { menu } = await buildTimeseriesTestScene();
|
||||
|
||||
expect(menu.state.items?.find((i) => i.text === 'Styles')).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should not show styles menu for non-timeseries panels', async () => {
|
||||
const { menu } = await buildTestScene({});
|
||||
|
||||
expect(menu.state.items?.find((i) => i.text === 'Styles')).toBeUndefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
interface SceneOptions {
|
||||
|
|
|
|||
|
|
@ -347,6 +347,48 @@ export function panelMenuBehavior(menu: VizPanelMenu) {
|
|||
}
|
||||
}
|
||||
|
||||
if (panel.state.pluginId === 'timeseries' && config.featureToggles.panelStyleActions) {
|
||||
const stylesSubMenu: PanelMenuItem[] = [];
|
||||
|
||||
stylesSubMenu.push({
|
||||
text: t('panel.header-menu.copy-styles', `Copy styles`),
|
||||
iconClassName: 'copy',
|
||||
onClick: () => {
|
||||
DashboardInteractions.panelStylesMenuClicked(
|
||||
'copy',
|
||||
panel.state.pluginId,
|
||||
getPanelIdForVizPanel(panel) ?? -1
|
||||
);
|
||||
dashboard.copyPanelStyles(panel);
|
||||
},
|
||||
});
|
||||
|
||||
if (DashboardScene.hasPanelStylesToPaste('timeseries')) {
|
||||
stylesSubMenu.push({
|
||||
text: t('panel.header-menu.paste-styles', `Paste styles`),
|
||||
iconClassName: 'clipboard-alt',
|
||||
onClick: () => {
|
||||
DashboardInteractions.panelStylesMenuClicked(
|
||||
'paste',
|
||||
panel.state.pluginId,
|
||||
getPanelIdForVizPanel(panel) ?? -1
|
||||
);
|
||||
dashboard.pastePanelStyles(panel);
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
items.push({
|
||||
type: 'submenu',
|
||||
text: t('panel.header-menu.styles', `Styles`),
|
||||
iconClassName: 'palette',
|
||||
subMenu: stylesSubMenu,
|
||||
onClick: (e) => {
|
||||
e.preventDefault();
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
if (moreSubMenu.length) {
|
||||
items.push({
|
||||
type: 'submenu',
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import {
|
|||
RowsLayoutRowKind,
|
||||
TabsLayoutTabKind,
|
||||
} from '@grafana/schema/dist/esm/schema/dashboard/v2';
|
||||
import { LS_PANEL_COPY_KEY, LS_ROW_COPY_KEY, LS_TAB_COPY_KEY } from 'app/core/constants';
|
||||
import { LS_PANEL_COPY_KEY, LS_ROW_COPY_KEY, LS_STYLES_COPY_KEY, LS_TAB_COPY_KEY } from 'app/core/constants';
|
||||
|
||||
import { deserializeAutoGridItem } from '../../serialization/layoutSerializers/AutoGridLayoutSerializer';
|
||||
import { deserializeGridItem } from '../../serialization/layoutSerializers/DefaultGridLayoutSerializer';
|
||||
|
|
@ -24,6 +24,7 @@ export function clearClipboard() {
|
|||
store.delete(LS_PANEL_COPY_KEY);
|
||||
store.delete(LS_ROW_COPY_KEY);
|
||||
store.delete(LS_TAB_COPY_KEY);
|
||||
store.delete(LS_STYLES_COPY_KEY);
|
||||
}
|
||||
|
||||
export interface RowStore {
|
||||
|
|
|
|||
|
|
@ -101,6 +101,11 @@ export const DashboardInteractions = {
|
|||
reportDashboardInteraction('panel_action_clicked', { item, id, source });
|
||||
},
|
||||
|
||||
// Panel styles copy/paste interactions
|
||||
panelStylesMenuClicked(action: 'copy' | 'paste', panelType: string, panelId: number, error?: boolean) {
|
||||
reportDashboardInteraction('panel_styles_menu_clicked', { action, panelType, panelId, error });
|
||||
},
|
||||
|
||||
// Dashboard edit item actions
|
||||
// dashboards_edit_action_clicked: when user adds or removes an item in edit mode
|
||||
// props: { item: string } - item is one of: add_panel, group_row, group_tab, ungroup, paste_panel, remove_row, remove_tab
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
import { lastValueFrom, map } from 'rxjs';
|
||||
import { lastValueFrom } from 'rxjs';
|
||||
|
||||
import { config, getBackendSrv, FetchResponse } from '@grafana/runtime';
|
||||
import { config, getBackendSrv } from '@grafana/runtime';
|
||||
import { contextSrv } from 'app/core/services/context_srv';
|
||||
import { DashboardDTO, SnapshotSpec } from 'app/types/dashboard';
|
||||
import { DashboardDataDTO, DashboardDTO } from 'app/types/dashboard';
|
||||
|
||||
import { getAPINamespace } from '../../../api/utils';
|
||||
|
||||
|
|
@ -82,11 +82,12 @@ interface DashboardSnapshotList {
|
|||
items: K8sSnapshotResource[];
|
||||
}
|
||||
|
||||
interface K8sDashboardSnapshot {
|
||||
// Response from the /dashboard subresource - returns a Dashboard with raw dashboard data in spec
|
||||
interface K8sDashboardSubresource {
|
||||
apiVersion: string;
|
||||
kind: 'Snapshot';
|
||||
kind: 'Dashboard';
|
||||
metadata: K8sMetadata;
|
||||
spec: SnapshotSpec;
|
||||
spec: DashboardDataDTO;
|
||||
}
|
||||
|
||||
class K8sAPI implements DashboardSnapshotSrv {
|
||||
|
|
@ -128,32 +129,32 @@ class K8sAPI implements DashboardSnapshotSrv {
|
|||
const token = `??? TODO, get anon token for snapshots (${contextSrv.user?.name}) ???`;
|
||||
headers['Authorization'] = `Bearer ${token}`;
|
||||
}
|
||||
return lastValueFrom(
|
||||
getBackendSrv()
|
||||
.fetch<K8sDashboardSnapshot>({
|
||||
|
||||
// Fetch both snapshot metadata and dashboard content in parallel
|
||||
const [snapshotResponse, dashboardResponse] = await Promise.all([
|
||||
lastValueFrom(
|
||||
getBackendSrv().fetch<K8sSnapshotResource>({
|
||||
url: this.url + '/' + uid,
|
||||
method: 'GET',
|
||||
headers: headers,
|
||||
})
|
||||
.pipe(
|
||||
map((response: FetchResponse<K8sDashboardSnapshot>) => {
|
||||
return {
|
||||
dashboard: response.data.spec.dashboard,
|
||||
meta: {
|
||||
isSnapshot: true,
|
||||
canSave: false,
|
||||
canEdit: false,
|
||||
canAdmin: false,
|
||||
canStar: false,
|
||||
canShare: false,
|
||||
canDelete: false,
|
||||
isFolder: false,
|
||||
provisioned: false,
|
||||
},
|
||||
};
|
||||
})
|
||||
)
|
||||
);
|
||||
),
|
||||
lastValueFrom(
|
||||
getBackendSrv().fetch<K8sDashboardSubresource>({
|
||||
url: this.url + '/' + uid + '/dashboard',
|
||||
method: 'GET',
|
||||
headers: headers,
|
||||
})
|
||||
),
|
||||
]);
|
||||
|
||||
return {
|
||||
dashboard: dashboardResponse.data.spec,
|
||||
meta: {
|
||||
isSnapshot: true,
|
||||
k8s: snapshotResponse.data.metadata,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -95,6 +95,9 @@ describe('ImportOverviewV2', () => {
|
|||
deleteDashboard: jest.fn(),
|
||||
listDeletedDashboards: jest.fn(),
|
||||
restoreDashboard: jest.fn(),
|
||||
listDashboardHistory: jest.fn(),
|
||||
getDashboardHistoryVersions: jest.fn(),
|
||||
restoreDashboardVersion: jest.fn(),
|
||||
});
|
||||
});
|
||||
|
||||
|
|
|
|||
16
public/app/features/panel/table/PaginationEditor.tsx
Normal file
16
public/app/features/panel/table/PaginationEditor.tsx
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
import * as React from 'react';
|
||||
|
||||
import { StandardEditorProps } from '@grafana/data';
|
||||
import { selectors } from '@grafana/e2e-selectors';
|
||||
import { Switch } from '@grafana/ui';
|
||||
|
||||
export const PaginationEditor = ({ onChange, value, id }: StandardEditorProps<boolean>) => (
|
||||
<Switch
|
||||
id={id}
|
||||
label={selectors.components.PanelEditor.OptionsPane.fieldLabel(`Enable pagination`)}
|
||||
value={Boolean(value)}
|
||||
onChange={(event: React.FormEvent<HTMLInputElement> | undefined) => {
|
||||
onChange(event?.currentTarget.checked);
|
||||
}}
|
||||
/>
|
||||
);
|
||||
86
public/app/features/panel/table/addTableCustomConfig.ts
Normal file
86
public/app/features/panel/table/addTableCustomConfig.ts
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
import { FieldConfigEditorBuilder } from '@grafana/data';
|
||||
import { t } from '@grafana/i18n';
|
||||
import { TableFieldOptions, defaultTableFieldOptions } from '@grafana/schema';
|
||||
|
||||
export function addTableCustomConfig<T extends TableFieldOptions>(
|
||||
builder: FieldConfigEditorBuilder<T>,
|
||||
options?: {
|
||||
hideFields?: boolean;
|
||||
filters?: boolean;
|
||||
wrapHeaderText?: boolean;
|
||||
}
|
||||
) {
|
||||
const category = [t('table.category-table', 'Table')];
|
||||
builder
|
||||
.addNumberInput({
|
||||
path: 'minWidth',
|
||||
name: t('table.name-min-column-width', 'Minimum column width'),
|
||||
category,
|
||||
description: t('table.description-min-column-width', 'The minimum width for column auto resizing'),
|
||||
settings: {
|
||||
placeholder: '150',
|
||||
min: 50,
|
||||
max: 500,
|
||||
},
|
||||
shouldApply: () => true,
|
||||
defaultValue: defaultTableFieldOptions.minWidth,
|
||||
})
|
||||
.addNumberInput({
|
||||
path: 'width',
|
||||
name: t('table.name-column-width', 'Column width'),
|
||||
category,
|
||||
settings: {
|
||||
placeholder: t('table.placeholder-column-width', 'auto'),
|
||||
min: 20,
|
||||
},
|
||||
shouldApply: () => true,
|
||||
defaultValue: defaultTableFieldOptions.width,
|
||||
})
|
||||
.addRadio({
|
||||
path: 'align',
|
||||
name: t('table.name-column-alignment', 'Column alignment'),
|
||||
category,
|
||||
settings: {
|
||||
options: [
|
||||
{ label: t('table.column-alignment-options.label-auto', 'Auto'), value: 'auto' },
|
||||
{ label: t('table.column-alignment-options.label-left', 'Left'), value: 'left' },
|
||||
{ label: t('table.column-alignment-options.label-center', 'Center'), value: 'center' },
|
||||
{ label: t('table.column-alignment-options.label-right', 'Right'), value: 'right' },
|
||||
],
|
||||
},
|
||||
defaultValue: defaultTableFieldOptions.align,
|
||||
})
|
||||
.addBooleanSwitch({
|
||||
path: 'wrapText',
|
||||
name: t('table.name-wrap-text', 'Wrap text'),
|
||||
category,
|
||||
});
|
||||
|
||||
if (options?.wrapHeaderText) {
|
||||
builder.addBooleanSwitch({
|
||||
path: 'wrapHeaderText',
|
||||
name: t('table.name-wrap-header-text', 'Wrap header text'),
|
||||
category,
|
||||
});
|
||||
}
|
||||
|
||||
if (options?.filters) {
|
||||
builder.addBooleanSwitch({
|
||||
path: 'filterable',
|
||||
name: t('table.name-column-filter', 'Column filter'),
|
||||
category,
|
||||
description: t('table.description-column-filter', 'Enables/disables field filters in table'),
|
||||
defaultValue: defaultTableFieldOptions.filterable,
|
||||
});
|
||||
}
|
||||
|
||||
if (options?.hideFields) {
|
||||
builder.addBooleanSwitch({
|
||||
path: 'hideFrom.viz',
|
||||
name: t('table.name-hide-in-table', 'Hide in table'),
|
||||
category,
|
||||
defaultValue: undefined,
|
||||
hideFromDefaults: true,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
import { PanelOptionsEditorBuilder } from '@grafana/data';
|
||||
import { t } from '@grafana/i18n';
|
||||
import { TableCellHeight, TableOptions, defaultTableOptions } from '@grafana/schema/dist/esm/common/common.gen';
|
||||
|
||||
import { PaginationEditor } from './PaginationEditor';
|
||||
|
||||
export const addTableCustomPanelOptions = <O extends TableOptions>(builder: PanelOptionsEditorBuilder<O>) => {
|
||||
const category = [t('table.category-table', 'Table')];
|
||||
builder
|
||||
.addBooleanSwitch({
|
||||
path: 'showHeader',
|
||||
name: t('table.name-show-table-header', 'Show table header'),
|
||||
category,
|
||||
defaultValue: defaultTableOptions.showHeader,
|
||||
})
|
||||
.addNumberInput({
|
||||
path: 'frozenColumns.left',
|
||||
name: t('table.name-frozen-columns', 'Frozen columns'),
|
||||
description: t('table.description-frozen-columns', 'Columns are frozen from the left side of the table'),
|
||||
settings: {
|
||||
placeholder: t('table.placeholder-frozen-columns', 'none'),
|
||||
},
|
||||
category,
|
||||
})
|
||||
.addRadio({
|
||||
path: 'cellHeight',
|
||||
name: t('table.name-cell-height', 'Cell height'),
|
||||
category,
|
||||
defaultValue: defaultTableOptions.cellHeight,
|
||||
settings: {
|
||||
options: [
|
||||
{ value: TableCellHeight.Sm, label: t('table.cell-height-options.label-small', 'Small') },
|
||||
{ value: TableCellHeight.Md, label: t('table.cell-height-options.label-medium', 'Medium') },
|
||||
{ value: TableCellHeight.Lg, label: t('table.cell-height-options.label-large', 'Large') },
|
||||
],
|
||||
},
|
||||
})
|
||||
.addNumberInput({
|
||||
path: 'maxRowHeight',
|
||||
name: t('table.name-max-height', 'Max row height'),
|
||||
category,
|
||||
settings: {
|
||||
placeholder: t('table.placeholder-max-height', 'none'),
|
||||
min: 0,
|
||||
},
|
||||
})
|
||||
.addCustomEditor({
|
||||
id: 'enablePagination',
|
||||
path: 'enablePagination',
|
||||
name: t('table.name-enable-pagination', 'Enable pagination'),
|
||||
category,
|
||||
editor: PaginationEditor,
|
||||
defaultValue: defaultTableOptions?.enablePagination,
|
||||
});
|
||||
};
|
||||
|
|
@ -1,9 +1,9 @@
|
|||
import { DashboardLoadedEvent } from '@grafana/data';
|
||||
import { CoreApp, DataQueryRequest, DataQueryResponse, DashboardLoadedEvent } from '@grafana/data';
|
||||
import { reportInteraction } from '@grafana/runtime';
|
||||
|
||||
import { ElasticsearchDataQuery } from './dataquery.gen';
|
||||
import pluginJson from './plugin.json';
|
||||
import { onDashboardLoadedHandler } from './tracking';
|
||||
import { onDashboardLoadedHandler, trackQuery } from './tracking';
|
||||
|
||||
jest.mock('@grafana/runtime', () => ({
|
||||
...jest.requireActual('@grafana/runtime'),
|
||||
|
|
@ -61,3 +61,90 @@ describe('onDashboardLoadedHandler', () => {
|
|||
expect(console.error).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('trackQuery', () => {
|
||||
beforeEach(() => {
|
||||
jest.mocked(reportInteraction).mockClear();
|
||||
});
|
||||
|
||||
test('tracks editor_type for code editor queries', () => {
|
||||
const query: ElasticsearchDataQuery = {
|
||||
refId: 'A',
|
||||
editorType: 'code',
|
||||
metrics: [{ id: '1', type: 'count' }],
|
||||
bucketAggs: [],
|
||||
};
|
||||
|
||||
const request: DataQueryRequest<ElasticsearchDataQuery> & { targets: ElasticsearchDataQuery[] } = {
|
||||
app: CoreApp.Explore,
|
||||
targets: [query],
|
||||
} as DataQueryRequest<ElasticsearchDataQuery>;
|
||||
|
||||
const response: DataQueryResponse = {
|
||||
data: [{ length: 1 }],
|
||||
};
|
||||
|
||||
trackQuery(response, request, new Date());
|
||||
|
||||
expect(reportInteraction).toHaveBeenCalledWith(
|
||||
'grafana_elasticsearch_query_executed',
|
||||
expect.objectContaining({
|
||||
editor_type: 'code',
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('tracks editor_type as builder for builder queries', () => {
|
||||
const query: ElasticsearchDataQuery = {
|
||||
refId: 'A',
|
||||
editorType: 'builder',
|
||||
metrics: [{ id: '1', type: 'count' }],
|
||||
bucketAggs: [],
|
||||
};
|
||||
|
||||
const request: DataQueryRequest<ElasticsearchDataQuery> & { targets: ElasticsearchDataQuery[] } = {
|
||||
app: CoreApp.Explore,
|
||||
targets: [query],
|
||||
} as DataQueryRequest<ElasticsearchDataQuery>;
|
||||
|
||||
const response: DataQueryResponse = {
|
||||
data: [{ length: 1 }],
|
||||
};
|
||||
|
||||
trackQuery(response, request, new Date());
|
||||
|
||||
expect(reportInteraction).toHaveBeenCalledWith(
|
||||
'grafana_elasticsearch_query_executed',
|
||||
expect.objectContaining({
|
||||
editor_type: 'builder',
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('defaults to builder when editor_type is not specified', () => {
|
||||
const query: ElasticsearchDataQuery = {
|
||||
refId: 'A',
|
||||
query: 'test query',
|
||||
metrics: [{ id: '1', type: 'count' }],
|
||||
bucketAggs: [],
|
||||
};
|
||||
|
||||
const request: DataQueryRequest<ElasticsearchDataQuery> & { targets: ElasticsearchDataQuery[] } = {
|
||||
app: CoreApp.Explore,
|
||||
targets: [query],
|
||||
} as DataQueryRequest<ElasticsearchDataQuery>;
|
||||
|
||||
const response: DataQueryResponse = {
|
||||
data: [{ length: 1 }],
|
||||
};
|
||||
|
||||
trackQuery(response, request, new Date());
|
||||
|
||||
expect(reportInteraction).toHaveBeenCalledWith(
|
||||
'grafana_elasticsearch_query_executed',
|
||||
expect.objectContaining({
|
||||
editor_type: 'builder',
|
||||
})
|
||||
);
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -138,6 +138,7 @@ export function trackQuery(
|
|||
time_range_from: request?.range?.from?.toISOString(),
|
||||
time_range_to: request?.range?.to?.toISOString(),
|
||||
time_taken: Date.now() - startTime.getTime(),
|
||||
editor_type: query.editorType || 'builder',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,20 +0,0 @@
|
|||
import * as React from 'react';
|
||||
|
||||
import { StandardEditorProps } from '@grafana/data';
|
||||
import { selectors } from '@grafana/e2e-selectors';
|
||||
import { Switch } from '@grafana/ui';
|
||||
|
||||
export function PaginationEditor({ onChange, value, id }: StandardEditorProps<boolean>) {
|
||||
const changeValue = (event: React.FormEvent<HTMLInputElement> | undefined) => {
|
||||
onChange(event?.currentTarget.checked);
|
||||
};
|
||||
|
||||
return (
|
||||
<Switch
|
||||
id={id}
|
||||
label={selectors.components.PanelEditor.OptionsPane.fieldLabel(`Enable pagination`)}
|
||||
value={Boolean(value)}
|
||||
onChange={changeValue}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
|
@ -1,18 +1,18 @@
|
|||
import { PanelPlugin, standardEditorsRegistry, identityOverrideProcessor, FieldConfigProperty } from '@grafana/data';
|
||||
import { identityOverrideProcessor, FieldConfigProperty, PanelPlugin, standardEditorsRegistry } from '@grafana/data';
|
||||
import { t } from '@grafana/i18n';
|
||||
import {
|
||||
defaultTableFieldOptions,
|
||||
TableCellOptions,
|
||||
TableCellDisplayMode,
|
||||
TableCellHeight,
|
||||
TableCellOptions,
|
||||
TableCellTooltipPlacement,
|
||||
defaultTableFieldOptions,
|
||||
} from '@grafana/schema';
|
||||
import { addTableCustomConfig } from 'app/features/panel/table/addTableCustomConfig';
|
||||
import { addTableCustomPanelOptions } from 'app/features/panel/table/addTableCustomPanelOptions';
|
||||
|
||||
import { PaginationEditor } from './PaginationEditor';
|
||||
import { TableCellOptionEditor } from './TableCellOptionEditor';
|
||||
import { TablePanel } from './TablePanel';
|
||||
import { tableMigrationHandler, tablePanelChangedHandler } from './migrations';
|
||||
import { Options, defaultOptions, FieldConfig } from './panelcfg.gen';
|
||||
import { FieldConfig, Options } from './panelcfg.gen';
|
||||
import { tableSuggestionsSupplier } from './suggestions';
|
||||
|
||||
export const plugin = new PanelPlugin<Options, FieldConfig>(TablePanel)
|
||||
|
|
@ -25,86 +25,31 @@ export const plugin = new PanelPlugin<Options, FieldConfig>(TablePanel)
|
|||
},
|
||||
},
|
||||
useCustomConfig: (builder) => {
|
||||
const category = [t('table.category-table', 'Table')];
|
||||
addTableCustomConfig(builder, {
|
||||
filters: true,
|
||||
wrapHeaderText: true,
|
||||
hideFields: true,
|
||||
});
|
||||
|
||||
const cellCategory = [t('table.category-cell-options', 'Cell options')];
|
||||
|
||||
builder.addCustomEditor({
|
||||
id: 'footer.reducers',
|
||||
category: [t('table.category-table-footer', 'Table footer')],
|
||||
path: 'footer.reducers',
|
||||
name: t('table.name-calculation', 'Calculation'),
|
||||
description: t('table.description-calculation', 'Choose a reducer function / calculation'),
|
||||
editor: standardEditorsRegistry.get('stats-picker').editor,
|
||||
override: standardEditorsRegistry.get('stats-picker').editor,
|
||||
defaultValue: [],
|
||||
process: identityOverrideProcessor,
|
||||
shouldApply: () => true,
|
||||
settings: {
|
||||
allowMultiple: true,
|
||||
},
|
||||
});
|
||||
|
||||
builder
|
||||
.addNumberInput({
|
||||
path: 'minWidth',
|
||||
name: t('table.name-min-column-width', 'Minimum column width'),
|
||||
category,
|
||||
description: t('table.description-min-column-width', 'The minimum width for column auto resizing'),
|
||||
settings: {
|
||||
placeholder: '150',
|
||||
min: 50,
|
||||
max: 500,
|
||||
},
|
||||
shouldApply: () => true,
|
||||
defaultValue: defaultTableFieldOptions.minWidth,
|
||||
})
|
||||
.addNumberInput({
|
||||
path: 'width',
|
||||
name: t('table.name-column-width', 'Column width'),
|
||||
category,
|
||||
settings: {
|
||||
placeholder: t('table.placeholder-column-width', 'auto'),
|
||||
min: 20,
|
||||
},
|
||||
shouldApply: () => true,
|
||||
defaultValue: defaultTableFieldOptions.width,
|
||||
})
|
||||
.addRadio({
|
||||
path: 'align',
|
||||
name: t('table.name-column-alignment', 'Column alignment'),
|
||||
category,
|
||||
settings: {
|
||||
options: [
|
||||
{ label: t('table.column-alignment-options.label-auto', 'Auto'), value: 'auto' },
|
||||
{ label: t('table.column-alignment-options.label-left', 'Left'), value: 'left' },
|
||||
{ label: t('table.column-alignment-options.label-center', 'Center'), value: 'center' },
|
||||
{ label: t('table.column-alignment-options.label-right', 'Right'), value: 'right' },
|
||||
],
|
||||
},
|
||||
defaultValue: defaultTableFieldOptions.align,
|
||||
})
|
||||
.addBooleanSwitch({
|
||||
path: 'filterable',
|
||||
name: t('table.name-column-filter', 'Column filter'),
|
||||
category,
|
||||
description: t('table.description-column-filter', 'Enables/disables field filters in table'),
|
||||
defaultValue: defaultTableFieldOptions.filterable,
|
||||
})
|
||||
.addBooleanSwitch({
|
||||
path: 'wrapText',
|
||||
name: t('table.name-wrap-text', 'Wrap text'),
|
||||
category,
|
||||
})
|
||||
.addBooleanSwitch({
|
||||
path: 'wrapHeaderText',
|
||||
name: t('table.name-wrap-header-text', 'Wrap header text'),
|
||||
category,
|
||||
})
|
||||
.addBooleanSwitch({
|
||||
path: 'hideFrom.viz',
|
||||
name: t('table.name-hide-in-table', 'Hide in table'),
|
||||
category,
|
||||
defaultValue: undefined,
|
||||
hideFromDefaults: true,
|
||||
})
|
||||
.addCustomEditor({
|
||||
id: 'footer.reducers',
|
||||
category: [t('table.category-table-footer', 'Table footer')],
|
||||
path: 'footer.reducers',
|
||||
name: t('table.name-calculation', 'Calculation'),
|
||||
description: t('table.description-calculation', 'Choose a reducer function / calculation'),
|
||||
editor: standardEditorsRegistry.get('stats-picker').editor,
|
||||
override: standardEditorsRegistry.get('stats-picker').editor,
|
||||
defaultValue: [],
|
||||
process: identityOverrideProcessor,
|
||||
shouldApply: () => true,
|
||||
settings: {
|
||||
allowMultiple: true,
|
||||
},
|
||||
})
|
||||
.addCustomEditor<void, TableCellOptions>({
|
||||
id: 'cellOptions',
|
||||
path: 'cellOptions',
|
||||
|
|
@ -179,52 +124,6 @@ export const plugin = new PanelPlugin<Options, FieldConfig>(TablePanel)
|
|||
},
|
||||
})
|
||||
.setPanelOptions((builder) => {
|
||||
const category = [t('table.category-table', 'Table')];
|
||||
builder
|
||||
.addBooleanSwitch({
|
||||
path: 'showHeader',
|
||||
name: t('table.name-show-table-header', 'Show table header'),
|
||||
category,
|
||||
defaultValue: defaultOptions.showHeader,
|
||||
})
|
||||
.addNumberInput({
|
||||
path: 'frozenColumns.left',
|
||||
name: t('table.name-frozen-columns', 'Frozen columns'),
|
||||
description: t('table.description-frozen-columns', 'Columns are frozen from the left side of the table'),
|
||||
settings: {
|
||||
placeholder: 'none',
|
||||
},
|
||||
category,
|
||||
})
|
||||
.addRadio({
|
||||
path: 'cellHeight',
|
||||
name: t('table.name-cell-height', 'Cell height'),
|
||||
category,
|
||||
defaultValue: defaultOptions.cellHeight,
|
||||
settings: {
|
||||
options: [
|
||||
{ value: TableCellHeight.Sm, label: t('table.cell-height-options.label-small', 'Small') },
|
||||
{ value: TableCellHeight.Md, label: t('table.cell-height-options.label-medium', 'Medium') },
|
||||
{ value: TableCellHeight.Lg, label: t('table.cell-height-options.label-large', 'Large') },
|
||||
],
|
||||
},
|
||||
})
|
||||
.addNumberInput({
|
||||
path: 'maxRowHeight',
|
||||
name: t('table.name-max-height', 'Max row height'),
|
||||
category,
|
||||
settings: {
|
||||
placeholder: t('table.placeholder-max-height', 'none'),
|
||||
min: 0,
|
||||
},
|
||||
})
|
||||
.addCustomEditor({
|
||||
id: 'enablePagination',
|
||||
path: 'enablePagination',
|
||||
name: t('table.name-enable-pagination', 'Enable pagination'),
|
||||
category,
|
||||
editor: PaginationEditor,
|
||||
defaultValue: defaultOptions?.enablePagination,
|
||||
});
|
||||
addTableCustomPanelOptions(builder);
|
||||
})
|
||||
.setSuggestionsSupplier(tableSuggestionsSupplier);
|
||||
|
|
|
|||
|
|
@ -24,31 +24,8 @@ composableKinds: PanelCfg: {
|
|||
schemas: [{
|
||||
version: [0, 0]
|
||||
schema: {
|
||||
Options: {
|
||||
// Represents the index of the selected frame
|
||||
frameIndex: number | *0
|
||||
// Controls whether the panel should show the header
|
||||
showHeader: bool | *true
|
||||
// Controls whether the header should show icons for the column types
|
||||
showTypeIcons?: bool | *false
|
||||
// Used to control row sorting
|
||||
sortBy?: [...ui.TableSortByFieldState]
|
||||
// Enable pagination on the table
|
||||
enablePagination?: bool
|
||||
// Controls the height of the rows
|
||||
cellHeight?: ui.TableCellHeight & (*"sm" | _)
|
||||
// limits the maximum height of a row, if text wrapping or dynamic height is enabled
|
||||
maxRowHeight?: number
|
||||
// Defines the number of columns to freeze on the left side of the table
|
||||
frozenColumns?: {
|
||||
left?: number | *0
|
||||
}
|
||||
// If true, disables all keyboard events in the table. this is used when previewing a table (i.e. suggestions)
|
||||
disableKeyboardEvents?: bool
|
||||
} @cuetsy(kind="interface")
|
||||
FieldConfig: {
|
||||
ui.TableFieldOptions
|
||||
} @cuetsy(kind="interface")
|
||||
Options: { ui.TableOptions } @cuetsy(kind="interface")
|
||||
FieldConfig: { ui.TableFieldOptions } @cuetsy(kind="interface")
|
||||
}
|
||||
}]
|
||||
lenses: []
|
||||
|
|
|
|||
49
public/app/plugins/panel/table/panelcfg.gen.ts
generated
49
public/app/plugins/panel/table/panelcfg.gen.ts
generated
|
|
@ -12,53 +12,6 @@
|
|||
|
||||
import * as ui from '@grafana/schema';
|
||||
|
||||
export interface Options {
|
||||
/**
|
||||
* Controls the height of the rows
|
||||
*/
|
||||
cellHeight?: ui.TableCellHeight;
|
||||
/**
|
||||
* If true, disables all keyboard events in the table. this is used when previewing a table (i.e. suggestions)
|
||||
*/
|
||||
disableKeyboardEvents?: boolean;
|
||||
/**
|
||||
* Enable pagination on the table
|
||||
*/
|
||||
enablePagination?: boolean;
|
||||
/**
|
||||
* Represents the index of the selected frame
|
||||
*/
|
||||
frameIndex: number;
|
||||
/**
|
||||
* Defines the number of columns to freeze on the left side of the table
|
||||
*/
|
||||
frozenColumns?: {
|
||||
left?: number;
|
||||
};
|
||||
/**
|
||||
* limits the maximum height of a row, if text wrapping or dynamic height is enabled
|
||||
*/
|
||||
maxRowHeight?: number;
|
||||
/**
|
||||
* Controls whether the panel should show the header
|
||||
*/
|
||||
showHeader: boolean;
|
||||
/**
|
||||
* Controls whether the header should show icons for the column types
|
||||
*/
|
||||
showTypeIcons?: boolean;
|
||||
/**
|
||||
* Used to control row sorting
|
||||
*/
|
||||
sortBy?: Array<ui.TableSortByFieldState>;
|
||||
}
|
||||
|
||||
export const defaultOptions: Partial<Options> = {
|
||||
cellHeight: ui.TableCellHeight.Sm,
|
||||
frameIndex: 0,
|
||||
showHeader: true,
|
||||
showTypeIcons: false,
|
||||
sortBy: [],
|
||||
};
|
||||
export interface Options extends ui.TableOptions {}
|
||||
|
||||
export interface FieldConfig extends ui.TableFieldOptions {}
|
||||
|
|
|
|||
|
|
@ -44,6 +44,54 @@ export const defaultGraphConfig: GraphFieldConfig = {
|
|||
showValues: false,
|
||||
};
|
||||
|
||||
/**
|
||||
* Defines graph style configuration properties. Properties from GraphFieldConfig.
|
||||
* Temporary config - PoC.
|
||||
*/
|
||||
export const defaultGraphStyleConfig = {
|
||||
fieldConfig: {
|
||||
defaultsProps: ['color'],
|
||||
defaults: [
|
||||
// Line config
|
||||
'lineColor',
|
||||
'lineInterpolation',
|
||||
'lineStyle',
|
||||
'lineWidth',
|
||||
'spanNulls',
|
||||
// Fill config
|
||||
'fillBelowTo',
|
||||
'fillColor',
|
||||
'fillOpacity',
|
||||
// Points config
|
||||
'pointColor',
|
||||
'pointSize',
|
||||
'pointSymbol',
|
||||
'showPoints',
|
||||
// Axis config
|
||||
'axisBorderShow',
|
||||
'axisCenteredZero',
|
||||
'axisColorMode',
|
||||
'axisGridShow',
|
||||
'axisLabel',
|
||||
'axisPlacement',
|
||||
'axisSoftMax',
|
||||
'axisSoftMin',
|
||||
'axisWidth',
|
||||
// Graph field config
|
||||
'drawStyle',
|
||||
'gradientMode',
|
||||
'insertNulls',
|
||||
'showValues',
|
||||
// Stacking
|
||||
'stacking',
|
||||
// Bar config
|
||||
'barAlignment',
|
||||
'barWidthFactor',
|
||||
'barMaxWidth',
|
||||
],
|
||||
},
|
||||
} as const;
|
||||
|
||||
export type NullEditorSettings = { isTime: boolean };
|
||||
|
||||
export function getGraphFieldConfig(cfg: GraphFieldConfig, isTime = true): SetFieldConfigOptionsArgs<GraphFieldConfig> {
|
||||
|
|
|
|||
|
|
@ -11290,6 +11290,7 @@
|
|||
},
|
||||
"header-menu": {
|
||||
"copy": "Copy",
|
||||
"copy-styles": "Copy styles",
|
||||
"create-library-panel": "Create library panel",
|
||||
"duplicate": "Duplicate",
|
||||
"edit": "Edit",
|
||||
|
|
@ -11301,11 +11302,13 @@
|
|||
"inspect-json": "Panel JSON",
|
||||
"more": "More...",
|
||||
"new-alert-rule": "New alert rule",
|
||||
"paste-styles": "Paste styles",
|
||||
"query": "Query",
|
||||
"remove": "Remove",
|
||||
"replace-library-panel": "Replace library panel",
|
||||
"share": "Share",
|
||||
"show-legend": "Show legend",
|
||||
"styles": "Styles",
|
||||
"time-settings": "Time settings",
|
||||
"unlink-library-panel": "Unlink library panel",
|
||||
"view": "View"
|
||||
|
|
@ -13627,6 +13630,7 @@
|
|||
"name-wrap-header-text": "Wrap header text",
|
||||
"name-wrap-text": "Wrap text",
|
||||
"placeholder-column-width": "auto",
|
||||
"placeholder-frozen-columns": "none",
|
||||
"placeholder-max-height": "none",
|
||||
"tooltip-placement-options": {
|
||||
"label-auto": "Auto",
|
||||
|
|
|
|||
Loading…
Reference in a new issue