Compare commits

..

11 Commits

Author SHA1 Message Date
Larissa Wandzura
1d44841640 created the new troubleshooting guide 2025-12-12 15:36:17 -06:00
Larissa Wandzura
3459c67bfb DOCS: Overhaul Azure Monitor data source docs (#115121)
* continued edits

* authentication updates

* added more info to configure doc

* started work on query editor

* reviewed the configure doc, consolidated sections

* fixed issue with headings

* fixed errors

* updates to the template variables doc

* created initial troubleshooting doc

* removed gerunds and fixed heading issues

* new annotations doc added

* more updates to query editor

* fixed spelling

* fixed some linter issues

* fixed flow for the intro doc

* updates to the intro doc

* fixed transformation links

* added review date to front matter

* ran prettier

* added a new alerting doc

* linter updates

* some final edits

* ran prettier again

* Update docs/sources/datasources/azure-monitor/configure/index.md

Co-authored-by: Andreas Christou <andreas.christou@grafana.com>

* Update docs/sources/datasources/azure-monitor/configure/index.md

Co-authored-by: Andreas Christou <andreas.christou@grafana.com>

* Update docs/sources/datasources/azure-monitor/troubleshooting/index.md

Co-authored-by: Andreas Christou <andreas.christou@grafana.com>

* edits based on feedback

* removed all relative reference links

* ran prettier

---------

Co-authored-by: Andreas Christou <andreas.christou@grafana.com>
2025-12-12 21:10:03 +00:00
Larissa Wandzura
37ccd8bc3d Docs: Added troubleshooting guide for the InfluxDB data source (#115191)
* Docs: Add troubleshooting guide for InfluxDB data source

* linter fixes, updates based on feedback
2025-12-12 20:21:50 +00:00
colin-stuart
5156177079 SCIM: show error if SCIM-provisioned user attempts login with non-SAML auth module (#115271) 2025-12-12 13:51:37 -06:00
Paul Marbach
4817ecf6a3 Sparkline: Guess decimals rather than going with 0 (#115246)
* Sparkline: Guess decimals rather than going with 0

* Update packages/grafana-ui/src/components/Sparkline/utils.test.ts
2025-12-12 13:59:54 -05:00
Renato Costa
c73cab8eef chore: add cleanup task for duplicated provisioned dashboards (#115103)
* chore: add cleanup task for duplicated provisioned dashboards
2025-12-12 13:56:47 -05:00
Adela Almasan
a37ebf609e VizSuggestions: Fix unique key warning (#115112) 2025-12-12 12:25:03 -06:00
Kristina Demeshchik
b29e8ccb45 Dashboards: Generate default tab title when converting rows with empty titles to tabs (#115256)
Generate default title for empty row titles
2025-12-12 13:14:56 -05:00
Matias Chomicki
644f7b7001 Infinite scroll: Fix interaction with client-side filter (#115243)
Infinite scroll: fix interaction with client-side filter
2025-12-12 18:59:49 +01:00
Alexander Zobnin
629570926d Zanzana: Fix resource translation for dashboards (#115077) 2025-12-12 11:05:10 -06:00
Will Assis
1b59c82b74 Revert "Unified-storage: sql backend key path backfill (#115033)" (#115257)
This reverts commit b2dd095bd8.
2025-12-12 17:00:08 +00:00
34 changed files with 3358 additions and 524 deletions

27
.github/CODEOWNERS vendored
View File

@@ -77,11 +77,11 @@
/.air.toml @macabu
# Git Sync / App Platform Provisioning
/apps/provisioning/ @grafana/grafana-app-platform-squad
/pkg/operators @grafana/grafana-app-platform-squad
/public/app/features/provisioning @grafana/grafana-search-navigate-organise
/pkg/registry/apis/provisioning @grafana/grafana-app-platform-squad
/pkg/tests/apis/provisioning @grafana/grafana-app-platform-squad
/apps/provisioning/ @grafana/grafana-git-ui-sync-team
/pkg/operators @grafana/grafana-git-ui-sync-team
/public/app/features/provisioning @grafana/grafana-git-ui-sync-team
/pkg/registry/apis/provisioning @grafana/grafana-git-ui-sync-team
/pkg/tests/apis/provisioning @grafana/grafana-git-ui-sync-team
# Git Sync frontend owned by frontend team as a whole.
/apps/alerting/ @grafana/alerting-backend
@@ -520,7 +520,7 @@ i18next.config.ts @grafana/grafana-frontend-platform
/e2e-playwright/various-suite/solo-route.spec.ts @grafana/dashboards-squad
/e2e-playwright/various-suite/trace-view-scrolling.spec.ts @grafana/observability-traces-and-profiling
/e2e-playwright/various-suite/verify-i18n.spec.ts @grafana/grafana-frontend-platform
/e2e-playwright/various-suite/visualization-suggestions.spec.ts @grafana/dashboards-squad
/e2e-playwright/various-suite/visualization-suggestions.spec.ts @grafana/dataviz-squad
/e2e-playwright/various-suite/perf-test.spec.ts @grafana/grafana-frontend-platform
# Packages
@@ -753,7 +753,7 @@ i18next.config.ts @grafana/grafana-frontend-platform
/packages/grafana-api-clients/src/clients/rtkq/iam/ @grafana/access-squad @grafana/identity-squad
/packages/grafana-api-clients/src/clients/rtkq/logsdrilldown/ @grafana/observability-logs
/packages/grafana-api-clients/src/clients/rtkq/preferences/ @grafana/plugins-platform-frontend
/packages/grafana-api-clients/src/clients/rtkq/provisioning/ @grafana/grafana-search-navigate-organise
/packages/grafana-api-clients/src/clients/rtkq/provisioning/ @grafana/grafana-git-ui-sync-team
/packages/grafana-api-clients/src/clients/rtkq/shorturl/ @grafana/sharing-squad
# root files, mostly frontend
@@ -956,6 +956,7 @@ playwright.storybook.config.ts @grafana/grafana-frontend-platform
/public/app/features/notifications/ @grafana/grafana-search-navigate-organise
/public/app/features/org/ @grafana/grafana-search-navigate-organise
/public/app/features/panel/ @grafana/dashboards-squad
/public/app/features/panel/components/VizTypePicker/VisualizationSuggestions.tsx @grafana/dataviz-squad
/public/app/features/panel/suggestions/ @grafana/dataviz-squad
/public/app/features/playlist/ @grafana/dashboards-squad
/public/app/features/plugins/ @grafana/plugins-platform-frontend
@@ -1084,7 +1085,7 @@ playwright.storybook.config.ts @grafana/grafana-frontend-platform
eslint-suppressions.json @grafanabot
# Design system
/public/img/icons/unicons/ @grafana/product-design-engineering
/public/img/icons/unicons/ @grafana/design-system
# Core datasources
/public/app/plugins/datasource/dashboard/ @grafana/dashboards-squad
@@ -1260,11 +1261,11 @@ embed.go @grafana/grafana-as-code
/.github/workflows/stale.yml @grafana/grafana-developer-enablement-squad
/.github/workflows/storybook-a11y.yml @grafana/grafana-frontend-platform
/.github/workflows/update-make-docs.yml @grafana/docs-tooling
/.github/workflows/scripts/kinds/verify-kinds.go @grafana/grafana-app-platform-squad
/.github/workflows/scripts/kinds/verify-kinds.go @grafana/platform-monitoring
/.github/workflows/scripts/create-security-branch/create-security-branch.sh @grafana/grafana-developer-enablement-squad
/.github/workflows/publish-kinds-next.yml @grafana/grafana-app-platform-squad
/.github/workflows/publish-kinds-release.yml @grafana/grafana-app-platform-squad
/.github/workflows/verify-kinds.yml @grafana/grafana-app-platform-squad
/.github/workflows/publish-kinds-next.yml @grafana/platform-monitoring
/.github/workflows/publish-kinds-release.yml @grafana/platform-monitoring
/.github/workflows/verify-kinds.yml @grafana/platform-monitoring
/.github/workflows/dashboards-issue-add-label.yml @grafana/dashboards-squad
/.github/workflows/run-schema-v2-e2e.yml @grafana/dashboards-squad
/.github/workflows/run-dashboard-search-e2e.yml @grafana/grafana-search-and-storage
@@ -1325,7 +1326,7 @@ embed.go @grafana/grafana-as-code
/conf/provisioning/dashboards/ @grafana/dashboards-squad
/conf/provisioning/datasources/ @grafana/plugins-platform-backend
/conf/provisioning/plugins/ @grafana/plugins-platform-backend
/conf/provisioning/sample/ @grafana/grafana-app-platform-squad
/conf/provisioning/sample/ @grafana/grafana-git-ui-sync-team
# Security
/relyance.yaml @grafana/security-team

View File

@@ -3,7 +3,6 @@ aliases:
- ../data-sources/azure-monitor/
- ../features/datasources/azuremonitor/
- azuremonitor/
- azuremonitor/deprecated-application-insights/
description: Guide for using Azure Monitor in Grafana
keywords:
- grafana
@@ -23,6 +22,7 @@ labels:
menuTitle: Azure Monitor
title: Azure Monitor data source
weight: 300
last_reviewed: 2025-12-04
refs:
configure-grafana-feature-toggles:
- pattern: /docs/grafana/
@@ -49,6 +49,11 @@ refs:
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
transform-data:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
configure-grafana-azure:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#azure
@@ -63,295 +68,98 @@ refs:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-access/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-access/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
configure-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
query-editor-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
template-variables-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
alerting-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
troubleshooting-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
annotations-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
---
# Azure Monitor data source
Grafana ships with built-in support for Azure Monitor, the Azure service to maximize the availability and performance of applications and services in the Azure Cloud.
This topic explains configuring and querying specific to the Azure Monitor data source.
The Azure Monitor data source plugin allows you to query and visualize data from Azure Monitor, the Azure service to maximize the availability and performance of applications and services in the Azure Cloud.
For instructions on how to add a data source to Grafana, refer to the [administration documentation](ref:data-source-management).
Only users with the organization administrator role can add data sources.
## Supported Azure clouds
Once you've added the Azure Monitor data source, you can [configure it](#configure-the-data-source) so that your Grafana instance's users can create queries in its [query editor](query-editor/) when they [build dashboards](ref:build-dashboards) and use [Explore](ref:explore).
The Azure Monitor data source supports the following Azure cloud environments:
The Azure Monitor data source supports visualizing data from four Azure services:
- **Azure** - Azure public cloud (default)
- **Azure US Government** - Azure Government cloud
- **Azure China** - Azure China cloud operated by 21Vianet
- **Azure Monitor Metrics:** Collect numeric data from resources in your Azure account.
- **Azure Monitor Logs:** Collect log and performance data from your Azure account, and query using the Kusto Query Language (KQL).
- **Azure Resource Graph:** Query your Azure resources across subscriptions.
- **Azure Monitor Application Insights:** Collect trace logging data and other application performance metrics.
## Supported Azure services
## Configure the data source
The Azure Monitor data source supports the following Azure services:
**To access the data source configuration page:**
| Service | Description |
| ------------------------------- | --------------------------------------------------------------------------------------------------------------------------- |
| **Azure Monitor Metrics** | Collect numeric data from resources in your Azure account. Supports dimensions, aggregations, and time grain configuration. |
| **Azure Monitor Logs** | Collect log and performance data from your Azure account using the Kusto Query Language (KQL). |
| **Azure Resource Graph** | Query your Azure resources across subscriptions using KQL. Useful for inventory, compliance, and resource management. |
| **Application Insights Traces** | Collect distributed trace data and correlate requests across your application components. |
1. Click **Connections** in the left-side menu.
1. Under Your connections, click **Data sources**.
1. Enter `Azure Monitor` in the search bar.
1. Click **Azure Monitor**.
## Get started
The **Settings** tab of the data source is displayed.
The following documents will help you get started with the Azure Monitor data source:
### Configure Azure Active Directory (AD) authentication
- [Configure the Azure Monitor data source](ref:configure-azure-monitor) - Set up authentication and connect to Azure
- [Azure Monitor query editor](ref:query-editor-azure-monitor) - Create and edit queries for Metrics, Logs, Traces, and Resource Graph
- [Template variables](ref:template-variables-azure-monitor) - Create dynamic dashboards with Azure Monitor variables
- [Alerting](ref:alerting-azure-monitor) - Create alert rules using Azure Monitor data
- [Troubleshooting](ref:troubleshooting-azure-monitor) - Solve common configuration and query errors
You must create an app registration and service principal in Azure AD to authenticate the data source.
For configuration details, refer to the [Azure documentation for service principals](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#get-tenant-and-app-id-values-for-signing-in).
## Additional features
The app registration you create must have the `Reader` role assigned on the subscription.
For more information, refer to [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
After you have configured the Azure Monitor data source, you can:
If you host Grafana in Azure, such as in App Service or Azure Virtual Machines, you can configure the Azure Monitor data source to use Managed Identity for secure authentication without entering credentials into Grafana.
For details, refer to [Configuring using Managed Identity](#configuring-using-managed-identity).
- Add [Annotations](ref:annotations-azure-monitor) to overlay Azure log events on your graphs.
- Configure and use [Template variables](ref:template-variables-azure-monitor) for dynamic dashboards.
- Add [Transformations](ref:transform-data) to manipulate query results.
- Set up [Alerting](ref:alerting-azure-monitor) and recording rules using Metrics, Logs, Traces, and Resource Graph queries.
- Use [Explore](ref:explore) to investigate your Azure data without building a dashboard.
You can configure the Azure Monitor data source to use Workload Identity for secure authentication without entering credentials into Grafana if you host Grafana in a Kubernetes environment, such as AKS, and require access to Azure resources.
For details, refer to [Configuring using Workload Identity](#configuring-using-workload-identity).
## Pre-built dashboards
| Name | Description |
| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **Authentication** | Enables Managed Identity. Selecting Managed Identity hides many of the other fields. For details, see [Configuring using Managed Identity](#configuring-using-managed-identity). |
| **Azure Cloud** | Sets the national cloud for your Azure account. For most users, this is the default "Azure". For details, see the [Azure documentation](https://docs.microsoft.com/en-us/azure/active-directory/develop/authentication-national-cloud). |
| **Directory (tenant) ID** | Sets the directory/tenant ID for the Azure AD app registration to use for authentication. For details, see the [Azure tenant and app ID docs](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#get-tenant-and-app-id-values-for-signing-in). |
| **Application (client) ID** | Sets the application/client ID for the Azure AD app registration to use for authentication. |
| **Client secret** | Sets the application client secret for the Azure AD app registration to use for authentication. For details, see the [Azure application secret docs](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret). |
| **Default subscription** | _(Optional)_ Sets a default subscription for template variables to use. |
| **Enable Basic Logs** | Allows this data source to execute queries against [Basic Logs tables](https://learn.microsoft.com/en-us/azure/azure-monitor/logs/basic-logs-query?tabs=portal-1) in supported Log Analytics Workspaces. These queries may incur additional costs. |
The Azure Monitor plugin includes the following pre-built dashboards:
### Provision the data source
- **Azure Monitor Overview** - Displays key metrics across your Azure subscriptions and resources.
- **Azure Storage Account** - Shows storage account metrics including availability, latency, and transactions.
You can define and configure the data source in YAML files as part of Grafana's provisioning system.
For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-data-sources).
To import a pre-built dashboard:
#### Provisioning examples
1. Go to **Connections** > **Data sources**.
1. Select your Azure Monitor data source.
1. Click the **Dashboards** tab.
1. Click **Import** next to the dashboard you want to use.
**Azure AD App Registration (client secret):**
## Related resources
```yaml
apiVersion: 1 # config file version
datasources:
- name: Azure Monitor
type: grafana-azure-monitor-datasource
access: proxy
jsonData:
azureAuthType: clientsecret
cloudName: azuremonitor # See table below
tenantId: <tenant-id>
clientId: <client-id>
subscriptionId: <subscription-id> # Optional, default subscription
secureJsonData:
clientSecret: <client-secret>
version: 1
```
**Managed Identity:**
```yaml
apiVersion: 1 # config file version
datasources:
- name: Azure Monitor
type: grafana-azure-monitor-datasource
access: proxy
jsonData:
azureAuthType: msi
subscriptionId: <subscription-id> # Optional, default subscription
version: 1
```
**Workload Identity:**
```yaml
apiVersion: 1 # config file version
datasources:
- name: Azure Monitor
type: grafana-azure-monitor-datasource
access: proxy
jsonData:
azureAuthType: workloadidentity
subscriptionId: <subscription-id> # Optional, default subscription
version: 1
```
**Current User:**
{{< admonition type="note" >}}
The `oauthPassThru` property is required for current user authentication to function.
Additionally, `disableGrafanaCache` is necessary to prevent the data source returning cached responses for resources users don't have access to.
{{< /admonition >}}
```yaml
apiVersion: 1 # config file version
datasources:
- name: Azure Monitor
type: grafana-azure-monitor-datasource
access: proxy
jsonData:
azureAuthType: currentuser
oauthPassThru: true
disableGrafanaCache: true
subscriptionId: <subscription-id> # Optional, default subscription
version: 1
```
#### Supported cloud names
| Azure Cloud | `cloudName` Value |
| ------------------------------------ | -------------------------- |
| **Microsoft Azure public cloud** | `azuremonitor` (_Default_) |
| **Microsoft Chinese national cloud** | `chinaazuremonitor` |
| **US Government cloud** | `govazuremonitor` |
{{< admonition type="note" >}}
Cloud names for current user authentication differ to the `cloudName` values in the preceding table.
The public cloud name is `AzureCloud`, the Chinese national cloud name is `AzureChinaCloud`, and the US Government cloud name is `AzureUSGovernment`.
{{< /admonition >}}
### Configure Managed Identity
{{< admonition type="note" >}}
Managed Identity is available only in [Azure Managed Grafana](https://azure.microsoft.com/en-us/products/managed-grafana) or Grafana OSS/Enterprise when deployed in Azure. It is not available in Grafana Cloud.
{{< /admonition >}}
You can use managed identity to configure Azure Monitor in Grafana if you host Grafana in Azure (such as an App Service or with Azure Virtual Machines) and have managed identity enabled on your VM.
This lets you securely authenticate data sources without manually configuring credentials via Azure AD App Registrations.
For details on Azure managed identities, refer to the [Azure documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview).
**To enable managed identity for Grafana:**
1. Set the `managed_identity_enabled` flag in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure).
```ini
[azure]
managed_identity_enabled = true
```
2. In the Azure Monitor data source configuration, set **Authentication** to **Managed Identity**.
This hides the directory ID, application ID, and client secret fields, and the data source uses managed identity to authenticate to Azure Monitor Metrics and Logs, and Azure Resource Graph.
{{< figure src="/media/docs/grafana/data-sources/screenshot-managed-identity-2.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor screenshot showing Managed Identity authentication" >}}
3. You can set the `managed_identity_client_id` field in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure) to allow a user-assigned managed identity to be used instead of the default system-assigned identity.
```ini
[azure]
managed_identity_enabled = true
managed_identity_client_id = USER_ASSIGNED_IDENTITY_CLIENT_ID
```
### Configure Workload Identity
You can use workload identity to configure Azure Monitor in Grafana if you host Grafana in a Kubernetes environment, such as AKS, in conjunction with managed identities.
This lets you securely authenticate data sources without manually configuring credentials via Azure AD App Registrations.
For details on workload identity, refer to the [Azure workload identity documentation](https://azure.github.io/azure-workload-identity/docs/).
**To enable workload identity for Grafana:**
1. Set the `workload_identity_enabled` flag in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure).
```ini
[azure]
workload_identity_enabled = true
```
2. In the Azure Monitor data source configuration, set **Authentication** to **Workload Identity**.
This hides the directory ID, application ID, and client secret fields, and the data source uses workload identity to authenticate to Azure Monitor Metrics and Logs, and Azure Resource Graph.
{{< figure src="/media/docs/grafana/data-sources/screenshot-workload-identity.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor screenshot showing Workload Identity authentication" >}}
3. There are additional configuration variables that can control the authentication method.`workload_identity_tenant_id` represents the Azure AD tenant that contains the managed identity, `workload_identity_client_id` represents the client ID of the managed identity if it differs from the default client ID, `workload_identity_token_file` represents the path to the token file. Refer to the [documentation](https://azure.github.io/azure-workload-identity/docs/) for more information on what values these variables should use, if any.
```ini
[azure]
workload_identity_enabled = true
workload_identity_tenant_id = IDENTITY_TENANT_ID
workload_identity_client_id = IDENTITY_CLIENT_ID
workload_identity_token_file = TOKEN_FILE_PATH
```
### Configure Current User authentication
{{< admonition type="note" >}}
Current user authentication is an [experimental feature](/docs/release-life-cycle). Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Contact Grafana Support to enable this feature in Grafana Cloud. Aspects of Grafana may not work as expected when using this authentication method.
{{< /admonition >}}
If your Grafana instance is configured with Azure Entra (formerly Active Directory) authentication for login, this authentication method can be used to forward the currently logged in user's credentials to the data source. The users credentials will then be used when requesting data from the data source. For details on how to configure your Grafana instance using Azure Entra refer to the [documentation](ref:configure-grafana-azure-auth).
{{< admonition type="note" >}}
Additional configuration is required to ensure that the App Registration used to login a user via Azure provides an access token with the permissions required by the data source.
The App Registration must be configured to issue both **Access Tokens** and **ID Tokens**.
1. In the Azure Portal, open the App Registration that requires configuration.
2. Select **Authentication** in the side menu.
3. Under **Implicit grant and hybrid flows** check both the **Access tokens** and **ID tokens** boxes.
4. Save the changes to ensure the App Registration is updated.
The App Registration must also be configured with additional **API Permissions** to provide authenticated users with access to the APIs utilised by the data source.
1. In the Azure Portal, open the App Registration that requires configuration.
1. Select **API Permissions** in the side menu.
1. Ensure the `openid`, `profile`, `email`, and `offline_access` permissions are present under the **Microsoft Graph** section. If not, they must be added.
1. Select **Add a permission** and choose the following permissions. They must be added individually. Refer to the [Azure documentation](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-configure-app-access-web-apis) for more information.
- Select **Azure Service Management** > **Delegated permissions** > `user_impersonation` > **Add permissions**
- Select **APIs my organization uses** > Search for **Log Analytics API** and select it > **Delegated permissions** > `Date.Read` > **Add permissions**
Once all permissions have been added, the Azure authentication section in Grafana must be updated. The `scopes` section must be updated to include the `.default` scope to ensure that a token with access to all APIs declared on the App Registration is requested by Grafana. Once updated the scopes value should equal: `.default openid email profile`.
{{< /admonition >}}
This method of authentication doesn't inherently support all backend functionality as a user's credentials won't be in scope.
Affected functionality includes alerting, reporting, and recorded queries.
In order to support backend queries when using a data source configured with current user authentication, you can configure service credentials.
Also, note that query and resource caching is disabled by default for data sources using current user authentication.
{{< admonition type="note" >}}
To configure fallback service credentials the [feature toggle](ref:configure-grafana-feature-toggles) `idForwarding` must be set to `true` and `user_identity_fallback_credentials_enabled` must be enabled in the [Azure configuration section](ref:configure-grafana-azure) (enabled by default when `user_identity_enabled` is set to `true`).
{{< /admonition >}}
Permissions for fallback credentials may need to be broad to appropriately support backend functionality.
For example, an alerting query created by a user is dependent on their permissions.
If a user tries to create an alert for a resource that the fallback credentials can't access, the alert will fail.
**To enable current user authentication for Grafana:**
1. Set the `user_identity_enabled` flag in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure).
By default this will also enable fallback service credentials.
If you want to disable service credentials at the instance level set `user_identity_fallback_credentials_enabled` to false.
```ini
[azure]
user_identity_enabled = true
```
1. In the Azure Monitor data source configuration, set **Authentication** to **Current User**.
If fallback service credentials are enabled at the instance level, an additional configuration section is visible that you can use to enable or disable using service credentials for this data source.
{{< figure src="/media/docs/grafana/data-sources/screenshot-current-user.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor screenshot showing Current User authentication" >}}
1. If you want backend functionality to work with this data source, enable service credentials and configure the data source using the most applicable credentials for your circumstances.
## Query the data source
The Azure Monitor data source can query data from Azure Monitor Metrics and Logs, the Azure Resource Graph, and Application Insights Traces. Each source has its own specialized query editor.
For details, see the [query editor documentation](query-editor/).
## Use template variables
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables.
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
Grafana refers to such variables as template variables.
For details, see the [template variables documentation](template-variables/).
## Application Insights and Insights Analytics (removed)
Until Grafana v8.0, you could query the same Azure Application Insights data using Application Insights and Insights Analytics.
These queries were deprecated in Grafana v7.5. In Grafana v8.0, Application Insights and Insights Analytics were made read-only in favor of querying this data through Metrics and Logs. These query methods were completely removed in Grafana v9.0.
If you're upgrading from a Grafana version prior to v9.0 and relied on Application Insights and Analytics queries, refer to the [Grafana v9.0 documentation](/docs/grafana/v9.0/datasources/azuremonitor/deprecated-application-insights/) for help migrating these queries to Metrics and Logs queries.
- [Azure Monitor documentation](https://docs.microsoft.com/en-us/azure/azure-monitor/)
- [Kusto Query Language (KQL) reference](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/)
- [Grafana community forum](https://community.grafana.com/)

View File

@@ -0,0 +1,262 @@
---
aliases:
- ../../data-sources/azure-monitor/alerting/
description: Set up alerts using Azure Monitor data in Grafana
keywords:
- grafana
- azure
- monitor
- alerting
- alerts
- metrics
- logs
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Alerting
title: Azure Monitor alerting
weight: 500
refs:
alerting:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
alerting-fundamentals:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/
create-alert-rule:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/
grafana-managed-recording-rules:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
configure-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
query-editor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
troubleshoot:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
---
# Azure Monitor alerting
The Azure Monitor data source supports [Grafana Alerting](ref:alerting) and [Grafana-managed recording rules](ref:grafana-managed-recording-rules), allowing you to create alert rules based on Azure metrics, logs, traces, and resource data. You can monitor your Azure environment and receive notifications when specific conditions are met.
## Before you begin
- Ensure you have the appropriate permissions to create alert rules in Grafana.
- Verify your Azure Monitor data source is configured and working correctly.
- Familiarize yourself with [Grafana Alerting concepts](ref:alerting-fundamentals).
- **Important**: Verify your data source uses a supported authentication method. Refer to [Authentication requirements](#authentication-requirements).
## Supported query types for alerting
All Azure Monitor query types support alerting and recording rules:
| Query type | Use case | Notes |
| -------------------- | -------------------------------------------------- | -------------------------------------------------------- |
| Metrics | Threshold-based alerts on Azure resource metrics | Best suited for alerting; returns time-series data |
| Logs | Alert on log patterns, error counts, or thresholds | Use KQL to aggregate data into numeric values |
| Azure Resource Graph | Alert on resource state or configuration changes | Use count aggregations to return numeric data |
| Traces | Alert on trace data and application performance | Use aggregations to return numeric values for evaluation |
{{< admonition type="note" >}}
Alert queries must return numeric data that Grafana can evaluate against a threshold. Queries that return only text or non-numeric data cannot be used directly for alerting.
{{< /admonition >}}
## Authentication requirements
Alerting and recording rules run as background processes without a user context. This means they require service-level authentication and don't work with all authentication methods.
| Authentication method | Supported |
| -------------------------------- | ------------------------------------- |
| App Registration (client secret) | ✓ |
| Managed Identity | ✓ |
| Workload Identity | ✓ |
| Current User | ✓ (with fallback service credentials) |
{{< admonition type="note" >}}
If you use **Current User** authentication, you must configure **fallback service credentials** for alerting and recording rules to function. User credentials aren't available for background operations, so Grafana uses the fallback credentials instead. Refer to [configure the data source](ref:configure-azure-monitor) for details on setting up fallback credentials.
{{< /admonition >}}
## Create an alert rule
To create an alert rule using Azure Monitor data:
1. Go to **Alerting** > **Alert rules**.
1. Click **New alert rule**.
1. Enter a name for your alert rule.
1. In the **Define query and alert condition** section:
- Select your Azure Monitor data source.
- Configure your query (for example, a Metrics query for CPU usage or a Logs query using KQL).
- Add a **Reduce** expression if your query returns multiple series.
- Add a **Threshold** expression to define the alert condition.
1. Configure the **Set evaluation behavior**:
- Select or create a folder and evaluation group.
- Set the evaluation interval (how often the alert is checked).
- Set the pending period (how long the condition must be true before firing).
1. Add labels and annotations to provide context for notifications.
1. Click **Save rule**.
For detailed instructions, refer to [Create a Grafana-managed alert rule](ref:create-alert-rule).
## Example: VM CPU usage alert
This example creates an alert that fires when virtual machine CPU usage exceeds 80%:
1. Create a new alert rule.
1. Configure the query:
- **Service**: Metrics
- **Resource**: Select your virtual machine
- **Metric namespace**: `Microsoft.Compute/virtualMachines`
- **Metric**: `Percentage CPU`
- **Aggregation**: `Average`
1. Add expressions:
- **Reduce**: Last (to get the most recent data point)
- **Threshold**: Is above 80
1. Set evaluation to run every 1 minute with a 5-minute pending period.
1. Save the rule.
## Example: Error log count alert
This example alerts when error logs exceed a threshold using a KQL query:
1. Create a new alert rule.
1. Configure the query:
- **Service**: Logs
- **Resource**: Select your Log Analytics workspace
- **Query**:
```kusto
AppExceptions
| where TimeGenerated > ago(5m)
| summarize ErrorCount = count() by bin(TimeGenerated, 1m)
```
1. Add expressions:
- **Reduce**: Max (to get the highest count in the period)
- **Threshold**: Is above 10
1. Set evaluation to run every 5 minutes.
1. Save the rule.
## Example: Resource count alert
This example alerts when the number of running virtual machines drops below a threshold using Azure Resource Graph:
1. Create a new alert rule.
1. Configure the query:
- **Service**: Azure Resource Graph
- **Subscriptions**: Select your subscriptions
- **Query**:
```kusto
resources
| where type == "microsoft.compute/virtualmachines"
| where properties.extended.instanceView.powerState.displayStatus == "VM running"
| summarize RunningVMs = count()
```
1. Add expressions:
- **Reduce**: Last
- **Threshold**: Is below 3
1. Set evaluation to run every 5 minutes.
1. Save the rule.
## Best practices
Follow these recommendations to create reliable and efficient alerts with Azure Monitor data.
### Use appropriate query intervals
- Set the alert evaluation interval to be greater than or equal to the minimum data resolution from Azure Monitor.
- Azure Monitor Metrics typically have 1-minute granularity at minimum.
- Avoid very short intervals (less than 1 minute) as they may cause evaluation timeouts or miss data points.
### Reduce multiple series
When your Azure Monitor query returns multiple time series (for example, CPU usage across multiple VMs), use the **Reduce** expression to aggregate them:
- **Last**: Use the most recent value
- **Mean**: Average across all series
- **Max/Min**: Use the highest or lowest value
- **Sum**: Total across all series
### Optimize Log Analytics queries
For Logs queries used in alerting:
- Use `summarize` to aggregate data into numeric values.
- Include appropriate time filters using `ago()` or `TimeGenerated`.
- Avoid returning large result sets; aggregate data in the query.
- Test queries in Explore before using them in alert rules.
### Handle no data conditions
Configure what happens when no data is returned:
1. In the alert rule, find **Configure no data and error handling**.
1. Choose an appropriate action:
- **No Data**: Keep the alert in its current state
- **Alerting**: Treat no data as an alert condition
- **OK**: Treat no data as a healthy state
### Test queries before alerting
Always verify your query returns expected data before creating an alert:
1. Go to **Explore**.
1. Select your Azure Monitor data source.
1. Run the query you plan to use for alerting.
1. Confirm the data format and values are correct.
1. Verify the query returns numeric data suitable for threshold evaluation.
## Troubleshooting
If your Azure Monitor alerts aren't working as expected, use the following sections to diagnose and resolve common issues.
### Alerts not firing
- Verify the data source uses a supported authentication method. If using Current User authentication, ensure fallback service credentials are configured.
- Check that the query returns numeric data in Explore.
- Ensure the evaluation interval allows enough time for data to be available.
- Review the alert rule's health and any error messages in the Alerting UI.
### Authentication errors in alert evaluation
If you see authentication errors when alerts evaluate:
- Confirm the data source is configured with App Registration, Managed Identity, Workload Identity, or Current User with fallback service credentials.
- If using App Registration, verify the client secret hasn't expired.
- If using Current User, verify that fallback service credentials are configured and valid.
- Check that the service principal has appropriate permissions on Azure resources.
### Query timeout errors
- Simplify complex KQL queries.
- Reduce the time range in Log Analytics queries.
- Add more specific filters to narrow result sets.
For additional troubleshooting help, refer to [Troubleshoot Azure Monitor](ref:troubleshoot).
## Additional resources
- [Grafana Alerting documentation](ref:alerting)
- [Create alert rules](ref:create-alert-rule)
- [Azure Monitor query editor](ref:query-editor)
- [Grafana-managed recording rules](ref:grafana-managed-recording-rules)

View File

@@ -0,0 +1,218 @@
---
aliases:
- ../../data-sources/azure-monitor/annotations/
description: Use annotations with the Azure Monitor data source in Grafana
keywords:
- grafana
- azure
- monitor
- annotations
- events
- logs
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Annotations
title: Azure Monitor annotations
weight: 450
refs:
annotate-visualizations:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
query-editor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
---
# Azure Monitor annotations
[Annotations](ref:annotate-visualizations) overlay rich event information on top of graphs. You can use Azure Monitor Log Analytics queries to create annotations that mark important events, deployments, alerts, or other significant occurrences on your dashboards.
## Before you begin
- Ensure you have configured the Azure Monitor data source.
- You need access to a Log Analytics workspace containing the data you want to use for annotations.
- Annotations use Log Analytics (KQL) queries only. Metrics, Traces, and Azure Resource Graph queries are not supported for annotations.
## Create an annotation query
To add an Azure Monitor annotation to a dashboard:
1. Open the dashboard where you want to add annotations.
1. Click **Dashboard settings** (gear icon) in the top navigation.
1. Select **Annotations** in the left menu.
1. Click **Add annotation query**.
1. Enter a **Name** for the annotation (e.g., "Azure Activity", "Deployments").
1. Select your **Azure Monitor** data source.
1. Choose the **Logs** service.
1. Select a **Resource** (Log Analytics workspace or Application Insights resource).
1. Write a KQL query that returns the annotation data.
1. Click **Apply** to save.
## Query requirements
Your KQL query should return columns that Grafana can use to create annotations:
| Column | Required | Description |
| ------------------ | ----------- | ------------------------------------------------------------------------------------------------ |
| `TimeGenerated` | Yes | The timestamp for the annotation. Grafana uses this to position the annotation on the time axis. |
| `Text` | Recommended | The annotation text displayed when you hover over or click the annotation. |
| Additional columns | Optional | Any other columns returned become annotation tags. |
{{< admonition type="note" >}}
Always include a time filter in your query to limit results to the dashboard's time range. Use the `$__timeFilter()` macro.
{{< /admonition >}}
## Annotation query examples
The following examples demonstrate common annotation use cases.
### Azure Activity Log events
Display Azure Activity Log events such as resource modifications, deployments, and administrative actions:
```kusto
AzureActivity
| where $__timeFilter(TimeGenerated)
| where Level == "Error" or Level == "Warning" or CategoryValue == "Administrative"
| project TimeGenerated, Text=OperationNameValue, Level, ResourceGroup, Caller
| order by TimeGenerated desc
| take 100
```
### Deployment events
Show deployment-related activity:
```kusto
AzureActivity
| where $__timeFilter(TimeGenerated)
| where OperationNameValue contains "deployments"
| project TimeGenerated, Text=strcat("Deployment: ", OperationNameValue), Status=ActivityStatusValue, ResourceGroup
| order by TimeGenerated desc
```
### Application Insights exceptions
Mark application exceptions as annotations:
```kusto
AppExceptions
| where $__timeFilter(TimeGenerated)
| project TimeGenerated, Text=strcat(ProblemId, ": ", OuterMessage), SeverityLevel, AppRoleName
| order by TimeGenerated desc
| take 50
```
### Custom events from Application Insights
Display custom events logged by your application:
```kusto
AppEvents
| where $__timeFilter(TimeGenerated)
| where Name == "DeploymentStarted" or Name == "DeploymentCompleted"
| project TimeGenerated, Text=Name, AppRoleName
| order by TimeGenerated desc
```
### Security alerts
Show security-related alerts:
```kusto
SecurityAlert
| where $__timeFilter(TimeGenerated)
| project TimeGenerated, Text=AlertName, Severity=AlertSeverity, Description
| order by TimeGenerated desc
| take 50
```
### Resource health events
Display resource health status changes:
```kusto
AzureActivity
| where $__timeFilter(TimeGenerated)
| where CategoryValue == "ResourceHealth"
| project TimeGenerated, Text=OperationNameValue, Status=ActivityStatusValue, ResourceId
| order by TimeGenerated desc
```
### VM start and stop events
Mark virtual machine state changes:
```kusto
AzureActivity
| where $__timeFilter(TimeGenerated)
| where OperationNameValue has_any ("start", "deallocate", "restart")
| where ResourceProviderValue == "MICROSOFT.COMPUTE"
| project TimeGenerated, Text=OperationNameValue, VM=Resource, Status=ActivityStatusValue
| order by TimeGenerated desc
```
### Autoscale events
Show autoscale operations:
```kusto
AzureActivity
| where $__timeFilter(TimeGenerated)
| where OperationNameValue contains "autoscale"
| project TimeGenerated, Text=strcat("Autoscale: ", OperationNameValue), Status=ActivityStatusValue, ResourceGroup
| order by TimeGenerated desc
```
## Customize annotation appearance
After creating an annotation query, you can customize its appearance:
| Setting | Description |
| ------------- | -------------------------------------------------------------------------------------------------------- |
| **Color** | Choose a color for the annotation markers. Use different colors to distinguish between annotation types. |
| **Show in** | Select which panels display the annotations. |
| **Filter by** | Add filters to limit when annotations appear. |
## Best practices
Follow these recommendations when creating annotations:
1. **Limit results**: Always use `take` or `limit` to restrict the number of annotations. Too many annotations can clutter your dashboard and impact performance.
2. **Use time filters**: Include `$__timeFilter()` to ensure queries only return data within the dashboard's time range.
3. **Create meaningful text**: Use `strcat()` or `project` to create descriptive annotation text that provides context at a glance.
4. **Add relevant tags**: Include columns like `ResourceGroup`, `Severity`, or `Status` that become clickable tags for filtering.
5. **Use descriptive names**: Name your annotations clearly (e.g., "Production Deployments", "Critical Alerts") so dashboard users understand what they represent.
## Troubleshoot annotations
If annotations aren't appearing as expected, try the following solutions.
### Annotations don't appear
- Verify the query returns data in the selected time range.
- Check that the query includes a `TimeGenerated` column.
- Test the query in the Azure Portal Log Analytics query editor.
- Ensure the annotation is enabled (toggle is on).
### Too many annotations
- Add more specific filters to your query.
- Use `take` to limit results.
- Narrow the time range.
### Annotations appear at wrong times
- Verify the `TimeGenerated` column contains the correct timestamp.
- Check your dashboard's timezone settings.

View File

@@ -0,0 +1,605 @@
---
aliases:
- ../../data-sources/azure-monitor/configure/
description: Guide for configuring the Azure Monitor data source in Grafana.
keywords:
- grafana
- microsoft
- azure
- monitor
- application
- insights
- log
- analytics
- guide
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Configure
title: Configure the Azure Monitor data source
weight: 200
last_reviewed: 2025-12-04
refs:
configure-grafana-feature-toggles:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#feature_toggles
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#feature_toggles
provisioning-data-sources:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
explore:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
configure-grafana-azure-auth:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/
build-dashboards:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
configure-grafana-azure:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#azure
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#azure
data-source-management:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
configure-grafana-azure-auth-scopes:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
data-sources:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/
private-data-source-connect:
- pattern: /docs/grafana/
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
- pattern: /docs/grafana-cloud/
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
configure-pdc:
- pattern: /docs/grafana/
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
---
# Configure the Azure Monitor data source
This document explains how to configure the Azure Monitor data source and the available configuration options.
For general information about data sources, refer to [Grafana data sources](ref:data-sources) and [Data source management](ref:data-source-management).
## Before you begin
Before configuring the Azure Monitor data source, ensure you have the following:
- **Grafana permissions:** You must have the `Organization administrator` role to configure data sources.
Organization administrators can also [configure the data source via YAML](#provision-the-data-source) with the Grafana provisioning system or [using Terraform](#configure-with-terraform).
- **Azure prerequisites:** Depending on your chosen authentication method, you may need:
- A Microsoft Entra ID (formerly Azure AD) app registration with a service principal (for App Registration authentication)
- A Managed Identity enabled on your Azure VM or App Service (for Managed Identity authentication)
- Workload identity configured in your Kubernetes cluster (for Workload Identity authentication)
- Microsoft Entra ID authentication configured for Grafana login (for Current User authentication)
{{< admonition type="note" >}}
**Grafana Cloud users:** Managed Identity and Workload Identity authentication methods are not available in Grafana Cloud because they require Grafana to run on your Azure infrastructure. Use **App Registration** authentication instead.
{{< /admonition >}}
- **Azure RBAC permissions:** The identity used to authenticate must have the `Reader` role on the Azure subscription containing the resources you want to monitor.
For Log Analytics queries, the identity also needs appropriate permissions on the Log Analytics workspaces to be queried.
Refer to the [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
{{< admonition type="note" >}}
The Azure Monitor data source plugin is built into Grafana. No additional installation is required.
{{< /admonition >}}
## Add the data source
To add the Azure Monitor data source:
1. Click **Connections** in the left-side menu.
1. Click **Add new connection**.
1. Type `Azure Monitor` in the search bar.
1. Select **Azure Monitor**.
1. Click **Add new data source** in the upper right.
You're taken to the **Settings** tab where you can configure the data source.
## Choose an authentication method
The Azure Monitor data source supports four authentication methods. Choose based on where Grafana is hosted and your security requirements:
| Authentication method | Best for | Requirements |
| --------------------- | ------------------------------------------ | -------------------------------------------------------------- |
| **App Registration** | Any Grafana deployment | Microsoft Entra ID app registration with client secret |
| **Managed Identity** | Grafana hosted in Azure (VMs, App Service) | Managed identity enabled on the Azure resource |
| **Workload Identity** | Grafana in Kubernetes (AKS) | Workload identity federation configured |
| **Current User** | User-level access control | Microsoft Entra ID authentication configured for Grafana login |
## Configure authentication
Select one of the following authentication methods and complete the configuration.
### App Registration
Use a Microsoft Entra ID app registration (service principal) to authenticate. This method works with any Grafana deployment.
#### App Registration prerequisites
1. Create an app registration in Microsoft Entra ID.
Refer to the [Azure documentation for creating a service principal](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#get-tenant-and-app-id-values-for-signing-in).
1. Create a client secret for the app registration.
Refer to the [Azure documentation for creating a client secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).
1. Assign the `Reader` role to the app registration on the subscription or resources you want to monitor.
Refer to the [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
#### App Registration UI configuration
| Setting | Description |
| --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
| **Authentication** | Select **App Registration**. |
| **Azure Cloud** | The Azure environment to connect to. Select **Azure** for the public cloud, or choose Azure Government or Azure China for national clouds. |
| **Directory (tenant) ID** | The GUID that identifies your Microsoft Entra ID tenant. |
| **Application (client) ID** | The GUID for the app registration you created. |
| **Client secret** | The secret key for the app registration. Keep this secure and rotate periodically. |
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
#### Provision App Registration with YAML
```yaml
apiVersion: 1
datasources:
- name: Azure Monitor
type: grafana-azure-monitor-datasource
access: proxy
jsonData:
azureAuthType: clientsecret
cloudName: azuremonitor # See supported cloud names below
tenantId: <tenant-id>
clientId: <client-id>
subscriptionId: <subscription-id> # Optional, default subscription
secureJsonData:
clientSecret: <client-secret>
version: 1
```
### Managed Identity
Use Azure Managed Identity for secure, credential-free authentication when Grafana is hosted in Azure.
{{< admonition type="note" >}}
Managed Identity is available in [Azure Managed Grafana](https://azure.microsoft.com/en-us/products/managed-grafana) or self-hosted Grafana deployed in Azure. It is not available in Grafana Cloud.
{{< /admonition >}}
#### Managed Identity prerequisites
- Grafana must be hosted in Azure (App Service, Azure VMs, or Azure Managed Grafana).
- Managed identity must be enabled on the Azure resource hosting Grafana.
- The managed identity must have the `Reader` role on the subscription or resources you want to monitor.
For details on Azure managed identities, refer to the [Azure documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview).
#### Managed Identity Grafana server configuration
Enable managed identity in the Grafana server configuration:
```ini
[azure]
managed_identity_enabled = true
```
To use a user-assigned managed identity instead of the system-assigned identity, also set:
```ini
[azure]
managed_identity_enabled = true
managed_identity_client_id = <USER_ASSIGNED_IDENTITY_CLIENT_ID>
```
Refer to [Grafana Azure configuration](ref:configure-grafana-azure) for more details.
#### Managed Identity UI configuration
| Setting | Description |
| ------------------------ | --------------------------------------------------------------------------------------------------- |
| **Authentication** | Select **Managed Identity**. The directory ID, application ID, and client secret fields are hidden. |
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
{{< figure src="/media/docs/grafana/data-sources/screenshot-managed-identity-2.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor data source configured with Managed Identity" >}}
#### Provision Managed Identity with YAML
```yaml
apiVersion: 1
datasources:
- name: Azure Monitor
type: grafana-azure-monitor-datasource
access: proxy
jsonData:
azureAuthType: msi
subscriptionId: <subscription-id> # Optional, default subscription
version: 1
```
### Workload Identity
Use Azure Workload Identity for secure authentication in Kubernetes environments like AKS.
#### Workload Identity prerequisites
- Grafana must be running in a Kubernetes environment with workload identity federation configured.
- The workload identity must have the `Reader` role on the subscription or resources you want to monitor.
For details, refer to the [Azure workload identity documentation](https://azure.github.io/azure-workload-identity/docs/).
#### Workload Identity Grafana server configuration
Enable workload identity in the Grafana server configuration:
```ini
[azure]
workload_identity_enabled = true
```
Optional configuration variables:
```ini
[azure]
workload_identity_enabled = true
workload_identity_tenant_id = <IDENTITY_TENANT_ID> # Microsoft Entra ID tenant containing the managed identity
workload_identity_client_id = <IDENTITY_CLIENT_ID> # Client ID if different from default
workload_identity_token_file = <TOKEN_FILE_PATH> # Path to the token file
```
Refer to [Grafana Azure configuration](ref:configure-grafana-azure) and the [Azure workload identity documentation](https://azure.github.io/azure-workload-identity/docs/) for more details.
#### Workload Identity UI configuration
| Setting | Description |
| ------------------------ | ---------------------------------------------------------------------------------------------------- |
| **Authentication** | Select **Workload Identity**. The directory ID, application ID, and client secret fields are hidden. |
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
{{< figure src="/media/docs/grafana/data-sources/screenshot-workload-identity.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor data source configured with Workload Identity" >}}
#### Provision Workload Identity with YAML
```yaml
apiVersion: 1
datasources:
- name: Azure Monitor
type: grafana-azure-monitor-datasource
access: proxy
jsonData:
azureAuthType: workloadidentity
subscriptionId: <subscription-id> # Optional, default subscription
version: 1
```
### Current User
Forward the logged-in Grafana user's Azure credentials to the data source for user-level access control.
{{< admonition type="warning" >}}
Current User authentication is an [experimental feature](/docs/release-life-cycle/). Engineering and on-call support is not available. Documentation is limited. No SLA is provided. Contact Grafana Support to enable this feature in Grafana Cloud.
{{< /admonition >}}
#### Current User prerequisites
Your Grafana instance must be configured with Microsoft Entra ID authentication. Refer to the [Microsoft Entra ID authentication documentation](ref:configure-grafana-azure-auth).
#### Configure your Azure App Registration
The App Registration used for Grafana login requires additional configuration:
**Enable token issuance:**
1. In the Azure Portal, open your App Registration.
1. Select **Authentication** in the side menu.
1. Under **Implicit grant and hybrid flows**, check both **Access tokens** and **ID tokens**.
1. Save your changes.
**Add API permissions:**
1. In the Azure Portal, open your App Registration.
1. Select **API Permissions** in the side menu.
1. Ensure these permissions are present under **Microsoft Graph**: `openid`, `profile`, `email`, and `offline_access`.
1. Add the following permissions:
- **Azure Service Management** > **Delegated permissions** > `user_impersonation`
- **APIs my organization uses** > Search for **Log Analytics API** > **Delegated permissions** > `Data.Read`
Refer to the [Azure documentation](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-configure-app-access-web-apis) for more information.
**Update Grafana scopes:**
Update the `scopes` section in your Grafana Azure authentication configuration to include the `.default` scope:
```
.default openid email profile
```
#### Current User Grafana server configuration
Enable current user authentication in the Grafana server configuration:
```ini
[azure]
user_identity_enabled = true
```
By default, this also enables fallback service credentials. To disable fallback credentials at the instance level:
```ini
[azure]
user_identity_enabled = true
user_identity_fallback_credentials_enabled = false
```
{{< admonition type="note" >}}
To use fallback service credentials, the [feature toggle](ref:configure-grafana-feature-toggles) `idForwarding` must be set to `true`.
{{< /admonition >}}
#### Limitations and fallback credentials
Current User authentication doesn't support backend functionality like alerting, reporting, and recorded queries because user credentials aren't available for background operations.
To support these features, configure **fallback service credentials**. When enabled, Grafana uses the fallback credentials for backend operations. Note that operations using fallback credentials are limited to the permissions of those credentials, not the user's permissions.
{{< admonition type="note" >}}
Query and resource caching is disabled by default for data sources using Current User authentication.
{{< /admonition >}}
#### Current User UI configuration
| Setting | Description |
| -------------------------------- | ------------------------------------------------------------------------------------------- |
| **Authentication** | Select **Current User**. |
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
| **Fallback Service Credentials** | Enable and configure credentials for backend features like alerting. |
{{< figure src="/media/docs/grafana/data-sources/screenshot-current-user.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor data source configured with Current User authentication" >}}
#### Provision Current User with YAML
{{< admonition type="note" >}}
The `oauthPassThru` property is required for Current User authentication. The `disableGrafanaCache` property prevents returning cached responses for resources users don't have access to.
{{< /admonition >}}
```yaml
apiVersion: 1
datasources:
- name: Azure Monitor
type: grafana-azure-monitor-datasource
access: proxy
jsonData:
azureAuthType: currentuser
oauthPassThru: true
disableGrafanaCache: true
subscriptionId: <subscription-id> # Optional, default subscription
version: 1
```
## Additional configuration options
These settings apply to all authentication methods.
### General settings
| Setting | Description |
| ----------- | ------------------------------------------------------------------------------- |
| **Name** | The data source name used in panels and queries. Example: `azure-monitor-prod`. |
| **Default** | Toggle to make this the default data source for new panels. |
### Enable Basic Logs
Toggle **Enable Basic Logs** to allow queries against [Basic Logs tables](https://learn.microsoft.com/en-us/azure/azure-monitor/logs/basic-logs-query?tabs=portal-1) in supported Log Analytics Workspaces.
{{< admonition type="note" >}}
Querying Basic Logs tables incurs additional costs on a per-query basis.
{{< /admonition >}}
### Private data source connect (Grafana Cloud only)
If you're using Grafana Cloud and need to connect to Azure resources in a private network, use Private Data Source Connect (PDC).
1. Click the **Private data source connect** dropdown to select your PDC configuration.
1. Click **Manage private data source connect** to view your PDC connection details.
For more information, refer to [Private data source connect](ref:private-data-source-connect) and [Configure PDC](ref:configure-pdc).
## Supported cloud names
When provisioning the data source, use the following `cloudName` values:
| Azure Cloud | `cloudName` value |
| -------------------------------- | ------------------------ |
| Microsoft Azure public cloud | `azuremonitor` (default) |
| Microsoft Chinese national cloud | `chinaazuremonitor` |
| US Government cloud | `govazuremonitor` |
{{< admonition type="note" >}}
For Current User authentication, the cloud names differ: use `AzureCloud` for public cloud, `AzureChinaCloud` for the Chinese national cloud, and `AzureUSGovernment` for the US Government cloud.
{{< /admonition >}}
## Verify the connection
After configuring the data source, click **Save & test**. A successful connection displays a message confirming that the credentials are valid and have access to the configured default subscription.
If the test fails, verify:
- Your credentials are correct (tenant ID, client ID, client secret)
- The identity has the required Azure RBAC permissions
- For Managed Identity or Workload Identity, that the Grafana server configuration is correct
- Network connectivity to Azure endpoints
## Provision the data source
You can define and configure the Azure Monitor data source in YAML files as part of the Grafana provisioning system.
For more information about provisioning, refer to [Provisioning Grafana](ref:provisioning-data-sources).
### Provision quick reference
| Authentication method | `azureAuthType` value | Required fields |
| --------------------- | --------------------- | -------------------------------------------------- |
| App Registration | `clientsecret` | `tenantId`, `clientId`, `clientSecret` |
| Managed Identity | `msi` | None (uses VM identity) |
| Workload Identity | `workloadidentity` | None (uses pod identity) |
| Current User | `currentuser` | `oauthPassThru: true`, `disableGrafanaCache: true` |
All methods support the optional `subscriptionId` field to set a default subscription.
For complete YAML examples, see the [authentication method sections](#configure-authentication) above.
## Configure with Terraform
You can configure the Azure Monitor data source using the [Grafana Terraform provider](https://registry.terraform.io/providers/grafana/grafana/latest/docs). This approach enables infrastructure-as-code workflows and version control for your Grafana configuration.
### Terraform prerequisites
- [Terraform](https://www.terraform.io/downloads) installed
- Grafana Terraform provider configured with appropriate credentials
- For Grafana Cloud: A [Cloud Access Policy token](https://grafana.com/docs/grafana-cloud/account-management/authentication-and-permissions/access-policies/) with data source permissions
### Provider configuration
Configure the Grafana provider to connect to your Grafana instance:
```hcl
terraform {
required_providers {
grafana = {
source = "grafana/grafana"
version = ">= 2.0.0"
}
}
}
# For Grafana Cloud
provider "grafana" {
url = "<YOUR_GRAFANA_CLOUD_STACK_URL>"
auth = "<YOUR_SERVICE_ACCOUNT_TOKEN>"
}
# For self-hosted Grafana
# provider "grafana" {
# url = "http://localhost:3000"
# auth = "<API_KEY_OR_SERVICE_ACCOUNT_TOKEN>"
# }
```
### Terraform examples
The following examples show how to configure the Azure Monitor data source for each authentication method.
**App Registration (client secret):**
```hcl
resource "grafana_data_source" "azure_monitor" {
type = "grafana-azure-monitor-datasource"
name = "Azure Monitor"
json_data_encoded = jsonencode({
azureAuthType = "clientsecret"
cloudName = "azuremonitor"
tenantId = "<TENANT_ID>"
clientId = "<CLIENT_ID>"
subscriptionId = "<SUBSCRIPTION_ID>"
})
secure_json_data_encoded = jsonencode({
clientSecret = "<CLIENT_SECRET>"
})
}
```
**Managed Identity:**
```hcl
resource "grafana_data_source" "azure_monitor" {
type = "grafana-azure-monitor-datasource"
name = "Azure Monitor"
json_data_encoded = jsonencode({
azureAuthType = "msi"
subscriptionId = "<SUBSCRIPTION_ID>"
})
}
```
**Workload Identity:**
```hcl
resource "grafana_data_source" "azure_monitor" {
type = "grafana-azure-monitor-datasource"
name = "Azure Monitor"
json_data_encoded = jsonencode({
azureAuthType = "workloadidentity"
subscriptionId = "<SUBSCRIPTION_ID>"
})
}
```
**Current User:**
```hcl
resource "grafana_data_source" "azure_monitor" {
type = "grafana-azure-monitor-datasource"
name = "Azure Monitor"
json_data_encoded = jsonencode({
azureAuthType = "currentuser"
oauthPassThru = true
disableGrafanaCache = true
subscriptionId = "<SUBSCRIPTION_ID>"
})
}
```
**With Basic Logs enabled:**
Add `enableBasicLogs = true` to any of the above configurations:
```hcl
resource "grafana_data_source" "azure_monitor" {
type = "grafana-azure-monitor-datasource"
name = "Azure Monitor"
json_data_encoded = jsonencode({
azureAuthType = "clientsecret"
cloudName = "azuremonitor"
tenantId = "<TENANT_ID>"
clientId = "<CLIENT_ID>"
subscriptionId = "<SUBSCRIPTION_ID>"
enableBasicLogs = true
})
secure_json_data_encoded = jsonencode({
clientSecret = "<CLIENT_SECRET>"
})
}
```
For more information about the Grafana Terraform provider, refer to the [provider documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs) and the [grafana_data_source resource](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/data_source).

View File

@@ -21,6 +21,7 @@ labels:
menuTitle: Query editor
title: Azure Monitor query editor
weight: 300
last_reviewed: 2025-12-04
refs:
query-transform-data-query-options:
- pattern: /docs/grafana/
@@ -32,30 +33,85 @@ refs:
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
configure-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
explore:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
troubleshoot-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
configure-grafana-feature-toggles:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/
template-variables:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
alerting-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
annotations-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
---
# Azure Monitor query editor
This topic explains querying specific to the Azure Monitor data source.
For general documentation on querying data sources in Grafana, see [Query and transform data](ref:query-transform-data).
Grafana provides a query editor for the Azure Monitor data source, which is located on the [Explore page](ref:explore). You can also access the Azure Monitor query editor from a dashboard panel. Click the menu in the upper right of the panel and select **Edit**.
## Choose a query editing mode
This document explains querying specific to the Azure Monitor data source.
For general documentation on querying data sources in Grafana, refer to [Query and transform data](ref:query-transform-data).
The Azure Monitor data source's query editor has three modes depending on which Azure service you want to query:
The Azure Monitor data source can query data from Azure Monitor Metrics and Logs, the Azure Resource Graph, and Application Insights Traces. Each source has its own specialized query editor.
## Before you begin
- Ensure you have [configured the Azure Monitor data source](ref:configure-azure-monitor).
- Verify your credentials have appropriate permissions for the resources you want to query.
## Key concepts
If you're new to Azure Monitor, here are some key terms used throughout this documentation:
| Term | Description |
| ------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **KQL (Kusto Query Language)** | The query language used for Azure Monitor Logs and Azure Resource Graph. KQL uses a pipe-based syntax similar to Unix commands and is optimized for read-only data exploration. If you know SQL, the [SQL to Kusto cheat sheet](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/sqlcheatsheet) can help you get started. |
| **Log Analytics workspace** | An Azure resource that collects and stores log data from your Azure resources, applications, and services. You query this data using KQL. |
| **Application Insights** | Azure's application performance monitoring (APM) service. It collects telemetry data like requests, exceptions, and traces from your applications. |
| **Metrics vs. Logs** | **Metrics** are lightweight numeric values collected at regular intervals (e.g., CPU percentage). **Logs** are detailed records of events with varying schemas (e.g., request logs, error messages). Metrics use a visual query builder; Logs require KQL. |
## Choose a query editor mode
The Azure Monitor data source's query editor has four modes depending on which Azure service you want to query:
- **Metrics** for [Azure Monitor Metrics](#query-azure-monitor-metrics)
- **Logs** for [Azure Monitor Logs](#query-azure-monitor-logs)
- [**Azure Resource Graph**](#query-azure-resource-graph)
- **Traces** for [Application Insights Traces](#query-application-insights-traces)
- **Azure Resource Graph** for [Azure Resource Graph](#query-azure-resource-graph)
## Query Azure Monitor Metrics
Azure Monitor Metrics collects numeric data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and you can query them to investigate your resources' health and usage and maximise availability and performance.
Azure Monitor Metrics collects numeric data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and you can query them to investigate your resources' health and usage and maximize availability and performance.
Monitor Metrics use a lightweight format that stores only numeric data in a specific structure and supports near real-time scenarios, making it useful for fast detection of issues.
In contrast, Azure Monitor Logs can store a variety of data types, each with their own structure.
{{< figure src="/static/img/docs/azure-monitor/query-editor-metrics.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Logs Metrics sample query visualizing CPU percentage over time" >}}
{{< figure src="/static/img/docs/azure-monitor/query-editor-metrics.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor Metrics sample query visualizing CPU percentage over time" >}}
### Create a Metrics query
@@ -85,7 +141,7 @@ Optionally, you can apply further aggregations or filter by dimensions.
The available options change depending on what is relevant to the selected metric.
You can also augment queries by using [template variables](../template-variables/).
You can also augment queries by using [template variables](ref:template-variables).
### Format legend aliases
@@ -109,7 +165,7 @@ For example:
| `{{ dimensionname }}` | _(Legacy for backward compatibility)_ Replaced with the name of the first dimension. |
| `{{ dimensionvalue }}` | _(Legacy for backward compatibility)_ Replaced with the value of the first dimension. |
### Filter using dimensions
### Filter with dimensions
Some metrics also have dimensions, which associate additional metadata.
Dimensions are represented as key-value pairs assigned to each value of a metric.
@@ -121,7 +177,7 @@ For more information on multi-dimensional metrics, refer to the [Azure Monitor d
## Query Azure Monitor Logs
Azure Monitor Logs collects and organises log and performance data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and makes many sources of data available to query together with the [Kusto Query Language (KQL)](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/).
Azure Monitor Logs collects and organizes log and performance data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and makes many sources of data available to query together with the [Kusto Query Language (KQL)](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/).
While Azure Monitor Metrics stores only simplified numerical data, Logs can store different data types, each with their own structure.
You can also perform complex analysis of Logs data by using KQL.
@@ -130,6 +186,32 @@ The Azure Monitor data source also supports querying of [Basic Logs](https://lea
{{< figure src="/static/img/docs/azure-monitor/query-editor-logs.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor Logs sample query comparing successful requests to failed requests" >}}
### Logs query builder (public preview)
{{< admonition type="note" >}}
The Logs query builder is a [public preview feature](/docs/release-life-cycle/). It may not be enabled in all Grafana environments.
{{< /admonition >}}
The Logs query builder provides a visual interface for building Azure Monitor Logs queries without writing KQL. This is helpful if you're new to KQL or want to quickly build simple queries.
**To enable the Logs query builder:**
1. Enable the `azureMonitorLogsBuilderEditor` [feature toggle](ref:configure-grafana-feature-toggles) in your Grafana configuration.
1. Restart Grafana for the change to take effect.
**To switch between Builder and Code modes:**
When the feature is enabled, a **Builder / Code** toggle appears in the Logs query editor:
- **Builder**: Use the visual interface to select tables, columns, filters, and aggregations. The builder generates the KQL query for you.
- **Code**: Write KQL queries directly. Use this mode for complex queries that require full KQL capabilities.
New queries default to Builder mode. Existing queries that were created with raw KQL remain in Code mode.
{{< admonition type="note" >}}
You can switch from Builder to Code mode at any time to view or edit the generated KQL. However, switching from Code to Builder mode may not preserve complex queries that can't be represented in the builder interface.
{{< /admonition >}}
### Create a Logs query
**To create a Logs query:**
@@ -140,13 +222,13 @@ The Azure Monitor data source also supports querying of [Basic Logs](https://lea
Alternatively, you can dynamically query all resources under a single resource group or subscription.
{{< admonition type="note" >}}
If a timespan is specified in the query, the overlap of the timespan between the query and the dashboard will be used as the query timespan. See the [API documentation for
If a time span is specified in the query, the overlap between the query time span and the dashboard time range will be used. See the [API documentation for
details.](https://learn.microsoft.com/en-us/rest/api/loganalytics/dataaccess/query/get?tabs=HTTP#uri-parameters)
{{< /admonition >}}
1. Enter your KQL query.
You can also augment queries by using [template variables](../template-variables/).
You can also augment queries by using [template variables](ref:template-variables).
**To create a Basic Logs query:**
@@ -161,7 +243,7 @@ You can also augment queries by using [template variables](../template-variables
{{< /admonition >}}
1. Enter your KQL query.
You can also augment queries by using [template variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/).
You can also augment queries by using [template variables](ref:template-variables).
### Logs query examples
@@ -174,24 +256,28 @@ The Azure documentation includes resources to help you learn KQL:
- [Tutorial: Use Kusto queries in Azure Monitor](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/tutorial?pivots=azuremonitor)
- [SQL to Kusto cheat sheet](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/sqlcheatsheet)
> **Time-range:** The time-range that will be used for the query can be modified via the time-range switch. Selecting `Query` will only make use of time-ranges specified within the query.
> Specifying `Dashboard` will only make use of the Grafana time-range.
> If there are no time-ranges specified within the query, the default Log Analytics time-range will apply.
> For more details on this change, refer to the [Azure Monitor Logs API documentation](https://learn.microsoft.com/en-us/rest/api/loganalytics/dataaccess/query/get?tabs=HTTP#uri-parameters).
> If the `Intersection` option was previously chosen it will be migrated by default to `Dashboard`.
{{< admonition type="note" >}}
**Time-range:** The time-range used for the query can be modified via the time-range switch:
This example query returns a virtual machine's CPU performance, averaged over 5ms time grains:
- Selecting **Query** uses only time-ranges specified within the query.
- Selecting **Dashboard** uses only the Grafana dashboard time-range.
- If no time-range is specified in the query, the default Log Analytics time-range applies.
For more details, refer to the [Azure Monitor Logs API documentation](https://learn.microsoft.com/en-us/rest/api/loganalytics/dataaccess/query/get?tabs=HTTP#uri-parameters). If you previously used the `Intersection` option, it has been migrated to `Dashboard`.
{{< /admonition >}}
This example query returns a virtual machine's CPU performance, averaged over 5-minute time grains:
```kusto
Perf
# $__timeFilter is a special Grafana macro that filters the results to the time span of the dashboard
// $__timeFilter is a special Grafana macro that filters the results to the time span of the dashboard
| where $__timeFilter(TimeGenerated)
| where CounterName == "% Processor Time"
| summarize avg(CounterValue) by bin(TimeGenerated, 5m), Computer
| order by TimeGenerated asc
```
Use time series queries for values that change over time, usually for graph visualisations such as the Time series panel.
Use time series queries for values that change over time, usually for graph visualizations such as the Time series panel.
Each query should return at least a datetime column and numeric value column.
The result must also be sorted in ascending order by the datetime column.
@@ -357,21 +443,33 @@ Application Insights stores trace data in an underlying Log Analytics workspace
This query type only supports Application Insights resources.
{{< /admonition >}}
Running a query of this kind will return all trace data within the timespan specified by the panel/dashboard.
1. (Optional) Specify an **Operation ID** value to filter traces.
1. (Optional) Specify **event types** to filter by.
1. (Optional) Specify **event properties** to filter by.
1. (Optional) Change the **Result format** to switch between tabular format and trace format.
Optionally, you can apply further filtering or select a specific Operation ID to query. The result format can also be switched between a tabular format or the trace format which will return the data in a format that can be used with the Trace visualization.
{{< admonition type="note" >}}
Selecting the trace format filters events to only the `trace` type. Use this format with the Trace visualization.
{{< /admonition >}}
{{< admonition type="note" >}}
Selecting the trace format will filter events with the `trace` type.
{{< /admonition >}}
Running a query returns all trace data within the time span specified by the panel or dashboard time range.
1. Specify an Operation ID value.
1. Specify event types to filter by.
1. Specify event properties to filter by.
You can also augment queries by using [template variables](ref:template-variables).
You can also augment queries by using [template variables](../template-variables/).
## Use queries for alerting and recording rules
## Working with large Azure resource data sets
All Azure Monitor query types (Metrics, Logs, Azure Resource Graph, and Traces) can be used with Grafana Alerting and recording rules.
For detailed information about creating alert rules, supported query types, authentication requirements, and examples, refer to [Azure Monitor alerting](ref:alerting-azure-monitor).
## Work with large Azure resource datasets
If a request exceeds the [maximum allowed value of records](https://docs.microsoft.com/en-us/azure/governance/resource-graph/concepts/work-with-data#paging-results), the result is paginated and only the first page of results are returned.
You can use filters to reduce the amount of records returned under that value.
## Next steps
- [Use template variables](../template-variables/) to create dynamic, reusable dashboards
- [Add annotations](ref:annotations-azure-monitor) to overlay events on your graphs
- [Set up alerting](ref:alerting-azure-monitor) to create alert rules based on Azure Monitor data
- [Troubleshoot](ref:troubleshoot-azure-monitor) common query and configuration issues

View File

@@ -23,6 +23,7 @@ labels:
menuTitle: Template variables
title: Azure Monitor template variables
weight: 400
last_reviewed: 2025-12-04
refs:
variables:
- pattern: /docs/grafana/
@@ -34,6 +35,11 @@ refs:
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/
configure-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
---
# Azure Monitor template variables
@@ -42,58 +48,173 @@ Instead of hard-coding details such as resource group or resource name values in
This helps you create more interactive, dynamic, and reusable dashboards.
Grafana refers to such variables as template variables.
For an introduction to templating and template variables, refer to the [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables) documentation.
For an introduction to templating and template variables, refer to the [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables).
## Use query variables
## Before you begin
You can specify these Azure Monitor data source queries in the Variable edit view's **Query Type** field.
- Ensure you have [configured the Azure Monitor data source](ref:configure-azure-monitor).
- If you want template variables to auto-populate subscriptions, set a **Default Subscription** in the data source configuration.
| Name | Description |
| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
| **Subscriptions** | Returns subscriptions. |
| **Resource Groups** | Returns resource groups for a specified subscription. Supports multi-value. |
| **Namespaces** | Returns metric namespaces for the specified subscription. If a resource group is provided, only the namespaces within that group are returned. |
| **Regions** | Returns regions for the specified subscription |
| **Resource Names** | Returns a list of resource names for a specified subscription, resource group and namespace. Supports multi-value. |
| **Metric Names** | Returns a list of metric names for a resource. |
| **Workspaces** | Returns a list of workspaces for the specified subscription. |
| **Logs** | Use a KQL query to return values. |
| **Custom Namespaces** | Returns metric namespaces for the specified resource. |
| **Custom Metric Names** | Returns a list of custom metric names for the specified resource. |
## Create a template variable
To create a template variable for Azure Monitor:
1. Open the dashboard where you want to add the variable.
1. Click **Dashboard settings** (gear icon) in the top navigation.
1. Select **Variables** in the left menu.
1. Click **Add variable**.
1. Enter a **Name** for your variable (e.g., `subscription`, `resourceGroup`, `resource`).
1. In the **Type** dropdown, select **Query**.
1. In the **Data source** dropdown, select your Azure Monitor data source.
1. In the **Query Type** dropdown, select the appropriate query type (see [Available query types](#available-query-types)).
1. Configure any additional fields required by the selected query type.
1. Click **Run query** to preview the variable values.
1. Configure display options such as **Multi-value** or **Include All option** as needed.
1. Click **Apply** to save the variable.
## Available query types
The Azure Monitor data source provides the following query types for template variables:
| Query type | Description |
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------- |
| **Subscriptions** | Returns a list of Azure subscriptions accessible to the configured credentials. |
| **Resource Groups** | Returns resource groups for a specified subscription. Supports multi-value selection. |
| **Namespaces** | Returns metric namespaces for the specified subscription. If a resource group is specified, returns only namespaces within that group. |
| **Regions** | Returns Azure regions available for the specified subscription. |
| **Resource Names** | Returns resource names for a specified subscription, resource group, and namespace. Supports multi-value selection. |
| **Metric Names** | Returns available metric names for a specified resource. |
| **Workspaces** | Returns Log Analytics workspaces for the specified subscription. |
| **Logs** | Executes a KQL query and returns the results as variable values. See [Create a Logs variable](#create-a-logs-variable). |
| **Custom Namespaces** | Returns custom metric namespaces for a specified resource. |
| **Custom Metric Names** | Returns custom metric names for a specified resource. |
{{< admonition type="note" >}}
Custom metrics cannot be emitted against a subscription or resource group. Select resources only when you need to retrieve custom metric namespaces or custom metric names associated with a specific resource.
Custom metrics cannot be emitted against a subscription or resource group. Select specific resources when retrieving custom metric namespaces or custom metric names.
{{< /admonition >}}
You can use any Log Analytics Kusto Query Language (KQL) query that returns a single list of values in the `Query` field.
For example:
## Create cascading variables
| Query | List of values returned |
| ----------------------------------------------------------------------------------------- | --------------------------------------- |
| `workspace("myWorkspace").Heartbeat \| distinct Computer` | Virtual machines |
| `workspace("$workspace").Heartbeat \| distinct Computer` | Virtual machines with template variable |
| `workspace("$workspace").Perf \| distinct ObjectName` | Objects from the Perf table |
| `workspace("$workspace").Perf \| where ObjectName == "$object"` `\| distinct CounterName` | Metric names from the Perf table |
Cascading variables (also called dependent or chained variables) allow you to create dropdown menus that filter based on previous selections. This is useful for drilling down from subscription to resource group to specific resource.
### Query variable example
### Example: Subscription → Resource Group → Resource Name
This time series query uses query variables:
**Step 1: Create a Subscription variable**
1. Create a variable named `subscription`.
1. Set **Query Type** to **Subscriptions**.
**Step 2: Create a Resource Group variable**
1. Create a variable named `resourceGroup`.
1. Set **Query Type** to **Resource Groups**.
1. In the **Subscription** field, select `$subscription`.
**Step 3: Create a Resource Name variable**
1. Create a variable named `resource`.
1. Set **Query Type** to **Resource Names**.
1. In the **Subscription** field, select `$subscription`.
1. In the **Resource Group** field, select `$resourceGroup`.
1. Select the appropriate **Namespace** for your resources (e.g., `Microsoft.Compute/virtualMachines`).
Now when you change the subscription, the resource group dropdown updates automatically, and when you change the resource group, the resource name dropdown updates.
## Create a Logs variable
The **Logs** query type lets you use a KQL query to populate variable values. The query must return a single column of values.
**To create a Logs variable:**
1. Create a new variable with **Query Type** set to **Logs**.
1. Select a **Resource** (Log Analytics workspace or Application Insights resource).
1. Enter a KQL query that returns a single column.
### Logs variable query examples
| Query | Returns |
| ----------------------------------------- | ------------------------------------- |
| `Heartbeat \| distinct Computer` | List of virtual machine names |
| `Perf \| distinct ObjectName` | List of performance object names |
| `AzureActivity \| distinct ResourceGroup` | List of resource groups with activity |
| `AppRequests \| distinct Name` | List of application request names |
You can reference other variables in your Logs query:
```kusto
workspace("$workspace").Heartbeat | distinct Computer
```
```kusto
workspace("$workspace").Perf
| where ObjectName == "$object"
| distinct CounterName
```
## Variable refresh options
Control when your variables refresh by setting the **Refresh** option:
| Option | Behavior |
| ------------------------ | ----------------------------------------------------------------------------------------- |
| **On dashboard load** | Variables refresh each time the dashboard loads. Best for data that changes infrequently. |
| **On time range change** | Variables refresh when the dashboard time range changes. Use for time-sensitive queries. |
For dashboards with many variables or complex queries, use **On dashboard load** to improve performance.
## Use variables in queries
After you create template variables, you can use them in your Azure Monitor queries by referencing them with the `$` prefix.
### Metrics query example
In a Metrics query, select your variables in the resource picker fields:
- **Subscription**: `$subscription`
- **Resource Group**: `$resourceGroup`
- **Resource Name**: `$resource`
### Logs query example
Reference variables directly in your KQL queries:
```kusto
Perf
| where ObjectName == "$object" and CounterName == "$metric"
| where TimeGenerated >= $__timeFrom() and TimeGenerated <= $__timeTo()
| where $__contains(Computer, $computer)
| where $__contains(Computer, $computer)
| summarize avg(CounterValue) by bin(TimeGenerated, $__interval), Computer
| order by TimeGenerated asc
```
### Multi-value variables
## Multi-value variables
It is possible to select multiple values for **Resource Groups** and **Resource Names** and use a single metrics query pointing to those values as long as they:
You can enable **Multi-value** selection for **Resource Groups** and **Resource Names** variables. When using multi-value variables in a Metrics query, all selected resources must:
- Belong to the same subscription.
- Are in the same region.
- Are of the same type (namespace).
- Belong to the same subscription
- Be in the same Azure region
- Be of the same resource type (namespace)
Also, note that if a template variable pointing to multiple resource groups or names is used in another template variable as a parameter (e.g. to retrieve metric names), only the first value will be used. This means that the combination of the first resource group and name selected should be valid.
{{< admonition type="note" >}}
When a multi-value variable is used as a parameter in another variable query (for example, to retrieve metric names), only the first selected value is used. Ensure the first resource group and resource name combination is valid.
{{< /admonition >}}
## Troubleshoot template variables
If you encounter issues with template variables, try the following solutions.
### Variable returns no values
- Verify the Azure Monitor data source is configured correctly and can connect to Azure.
- Check that the credentials have appropriate permissions to list the requested resources.
- For cascading variables, ensure parent variables have valid selections.
### Variable values are outdated
- Check the **Refresh** setting and adjust if needed.
- Click the refresh icon next to the variable dropdown to manually refresh.
### Multi-value selection not working in queries
- Ensure the resources meet the requirements (same subscription, region, and type).
- For Logs queries, use the `$__contains()` macro to handle multi-value variables properly.

View File

@@ -0,0 +1,320 @@
---
aliases:
- ../../data-sources/azure-monitor/troubleshooting/
description: Troubleshooting guide for the Azure Monitor data source in Grafana
keywords:
- grafana
- azure
- monitor
- troubleshooting
- errors
- authentication
- query
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Troubleshoot
title: Troubleshoot Azure Monitor data source issues
weight: 500
last_reviewed: 2025-12-04
refs:
configure-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
template-variables:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
query-editor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
---
# Troubleshoot Azure Monitor data source issues
This document provides solutions to common issues you may encounter when configuring or using the Azure Monitor data source.
## Configuration and authentication errors
These errors typically occur when setting up the data source or when authentication credentials are invalid.
### "Authorization failed" or "Access denied"
**Symptoms:**
- Save & test fails with "Authorization failed"
- Queries return "Access denied" errors
- Subscriptions don't load when clicking **Load Subscriptions**
**Possible causes and solutions:**
| Cause | Solution |
| -------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| App registration doesn't have required permissions | Assign the `Reader` role to the app registration on the subscription or resource group you want to monitor. Refer to the [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current). |
| Incorrect tenant ID, client ID, or client secret | Verify the credentials in the Azure Portal under **App registrations** > your app > **Overview** (for IDs) and **Certificates & secrets** (for secret). |
| Client secret has expired | Create a new client secret in Azure and update the data source configuration. |
| Managed Identity not enabled on the Azure resource | For VMs, enable managed identity in the Azure Portal under **Identity**. For App Service, enable it under **Identity** in the app settings. |
| Managed Identity not assigned the Reader role | Assign the `Reader` role to the managed identity on the target subscription or resources. |
### "Invalid client secret" or "Client secret not found"
**Symptoms:**
- Authentication fails immediately after configuration
- Error message references invalid credentials
**Solutions:**
1. Ensure you copied the client secret **value**, not the secret ID. In Azure Portal under **Certificates & secrets**, the secret value is only shown once when created. The secret ID is a different identifier and won't work for authentication.
2. Verify the client secret was copied correctly (no extra spaces or truncation).
3. Check if the secret has expired in Azure Portal under **App registrations** > your app > **Certificates & secrets**.
4. Create a new secret and update the data source configuration.
### "Tenant not found" or "Invalid tenant ID"
**Symptoms:**
- Data source test fails with tenant-related errors
- Unable to authenticate
**Solutions:**
1. Verify the Directory (tenant) ID in Azure Portal under **Microsoft Entra ID** > **Overview**.
2. Ensure you're using the correct Azure cloud setting (Azure, Azure Government, or Azure China).
3. Check that the tenant ID is a valid GUID format.
### Managed Identity not working
**Symptoms:**
- Managed Identity option is available but authentication fails
- Error: "Managed identity authentication is not available"
**Solutions:**
1. Verify `managed_identity_enabled = true` is set in the Grafana server configuration under `[azure]`.
2. Confirm the Azure resource hosting Grafana has managed identity enabled.
3. For user-assigned managed identity, ensure `managed_identity_client_id` is set correctly.
4. Verify the managed identity has the `Reader` role on the target resources.
5. Restart Grafana after changing server configuration.
### Workload Identity not working
**Symptoms:**
- Workload Identity authentication fails in Kubernetes/AKS environment
- Token file errors
**Solutions:**
1. Verify `workload_identity_enabled = true` is set in the Grafana server configuration.
2. Check that the service account is correctly annotated for workload identity.
3. Verify the federated credential is configured in Azure.
4. Ensure the token path is accessible to the Grafana pod.
5. Check the workload identity webhook is running in the cluster.
## Query errors
These errors occur when executing queries against Azure Monitor services.
### "No data" or empty results
**Symptoms:**
- Query executes without error but returns no data
- Charts show "No data" message
**Possible causes and solutions:**
| Cause | Solution |
| --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- |
| Time range doesn't contain data | Expand the dashboard time range or verify data exists in Azure Portal. |
| Wrong resource selected | Verify you've selected the correct subscription, resource group, and resource. |
| Metric not available for resource | Not all metrics are available for all resources. Check available metrics in Azure Portal under the resource's **Metrics** blade. |
| Metric has no values | Some metrics only populate under certain conditions (e.g., error counts when errors occur). |
| Permissions issue | Verify the identity has read access to the specific resource. |
### "Bad request" or "Invalid query"
**Symptoms:**
- Query fails with 400 error
- Error message indicates query syntax issues
**Solutions for Logs queries:**
1. Validate your KQL syntax in the Azure Portal Log Analytics query editor.
2. Check for typos in table names or column names.
3. Ensure referenced tables exist in the selected workspace.
4. Verify the time range is valid (not in the future, not too far in the past for data retention).
**Solutions for Metrics queries:**
1. Verify the metric name is valid for the selected resource type.
2. Check that dimension filters use valid dimension names and values.
3. Ensure the aggregation type is supported for the selected metric.
### "Resource not found"
**Symptoms:**
- Query fails with 404 error
- Resource picker shows resources that can't be queried
**Solutions:**
1. Verify the resource still exists in Azure (it may have been deleted or moved).
2. Check that the subscription is correct.
3. Refresh the resource picker by re-selecting the subscription.
4. Verify the identity has access to the resource's resource group.
### Logs query timeout
**Symptoms:**
- Query runs for a long time then fails
- Error mentions timeout or query limits
**Solutions:**
1. Narrow the time range to reduce data volume.
2. Add filters to reduce the result set.
3. Use `summarize` to aggregate data instead of returning raw rows.
4. Consider using Basic Logs for large datasets (if enabled).
5. Break complex queries into smaller parts.
### "Metrics not available" for a resource
**Symptoms:**
- Resource appears in picker but no metrics are listed
- Metric dropdown is empty
**Solutions:**
1. Verify the resource type supports Azure Monitor metrics.
2. Check if the resource is in a region that supports metrics.
3. Some resources require diagnostic settings to emit metrics—configure these in Azure Portal.
4. Try selecting a different namespace for the resource.
## Azure Resource Graph errors
These errors are specific to Azure Resource Graph (ARG) queries.
### "Query execution failed"
**Symptoms:**
- ARG query fails with execution errors
- Results don't match expected resources
**Solutions:**
1. Validate query syntax in Azure Portal Resource Graph Explorer.
2. Check that you have access to the subscriptions being queried.
3. Verify table names are correct (e.g., `Resources`, `ResourceContainers`).
4. Some ARG features require specific permissions, check [ARG documentation](https://docs.microsoft.com/en-us/azure/governance/resource-graph/).
### Query returns incomplete results
**Symptoms:**
- Not all expected resources appear in results
- Results seem truncated
**Solutions:**
1. ARG queries are paginated. The data source handles pagination automatically, but very large result sets may be limited.
2. Add filters to reduce result set size.
3. Verify you have access to all subscriptions containing the resources.
## Application Insights Traces errors
These errors are specific to the Traces query type.
### "No traces found"
**Symptoms:**
- Trace query returns empty results
- Operation ID search finds nothing
**Solutions:**
1. Verify the Application Insights resource is collecting trace data.
2. Check that the time range includes when the traces were generated.
3. Ensure the Operation ID is correct (copy directly from another trace or log).
4. Verify the identity has access to the Application Insights resource.
## Template variable errors
For detailed troubleshooting of template variables, refer to the [template variables troubleshooting section](ref:template-variables).
### Variables return no values
**Solutions:**
1. Verify the data source connection is working (test it in the data source settings).
2. Check that parent variables (for cascading variables) have valid selections.
3. Verify the identity has permissions to list the requested resources.
4. For Logs variables, ensure the KQL query returns a single column.
### Variables are slow to load
**Solutions:**
1. Set variable refresh to **On dashboard load** instead of **On time range change**.
2. Reduce the scope of variable queries (e.g., filter by resource group instead of entire subscription).
3. For Logs variables, optimize the KQL query to return results faster.
## Connection and network errors
These errors indicate problems with network connectivity between Grafana and Azure services.
### "Connection refused" or timeout errors
**Symptoms:**
- Data source test fails with network errors
- Queries timeout without returning results
**Solutions:**
1. Verify network connectivity from Grafana to Azure endpoints.
2. Check firewall rules allow outbound HTTPS (port 443) to Azure services.
3. For private networks, ensure Private Link or VPN is configured correctly.
4. For Grafana Cloud, configure [Private Data Source Connect](ref:configure-azure-monitor) if accessing private resources.
### SSL/TLS certificate errors
**Symptoms:**
- Certificate validation failures
- SSL handshake errors
**Solutions:**
1. Ensure the system time is correct (certificate validation fails with incorrect time).
2. Verify corporate proxy isn't intercepting HTTPS traffic.
3. Check that required CA certificates are installed on the Grafana server.
## Get additional help
If you've tried the solutions above and still encounter issues:
1. Check the [Grafana community forums](https://community.grafana.com/) for similar issues.
1. Review the [Azure Monitor data source GitHub issues](https://github.com/grafana/grafana/issues) for known bugs.
1. Enable debug logging in Grafana to capture detailed error information.
1. Contact Grafana Support if you're an Enterprise, Cloud Pro or Cloud Contracted user.
1. When reporting issues, include:
- Grafana version
- Error messages (redact sensitive information)
- Steps to reproduce
- Relevant configuration (redact credentials)

View File

@@ -52,6 +52,7 @@ The following documents will help you get started with the InfluxDB data source
- [Configure the InfluxDB data source](./configure-influxdb-data-source/)
- [InfluxDB query editor](./query-editor/)
- [InfluxDB templates and variables](./template-variables/)
- [Troubleshoot issues with the InfluxDB data source](./troubleshooting/)
Once you have configured the data source you can:

View File

@@ -0,0 +1,291 @@
---
aliases:
- ../../data-sources/influxdb/troubleshooting/
description: Troubleshooting the InfluxDB data source in Grafana
keywords:
- grafana
- influxdb
- troubleshooting
- errors
- flux
- influxql
- sql
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Troubleshooting
title: Troubleshoot issues with the InfluxDB data source
weight: 600
---
# Troubleshoot issues with the InfluxDB data source
This document provides troubleshooting information for common errors you may encounter when using the InfluxDB data source in Grafana.
## Connection errors
The following errors occur when Grafana cannot establish or maintain a connection to InfluxDB.
### Failed to connect to InfluxDB
**Error message:** "error performing influxQL query" or "error performing flux query" or "error performing sql query"
**Cause:** Grafana cannot establish a network connection to the InfluxDB server.
**Solution:**
1. Verify that the InfluxDB URL is correct in the data source configuration.
1. Check that InfluxDB is running and accessible from the Grafana server.
1. Ensure the URL includes the protocol (`http://` or `https://`).
1. Verify the port is correct (the InfluxDB default API port is `8086`).
1. Ensure there are no firewall rules blocking the connection.
1. For Grafana Cloud, ensure you have configured [Private data source connect](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/) if your InfluxDB instance is not publicly accessible.
### Request timed out
**Error message:** "context deadline exceeded" or "request timeout"
**Cause:** The connection to InfluxDB timed out before receiving a response.
**Solution:**
1. Check the network latency between Grafana and InfluxDB.
1. Verify that InfluxDB is not overloaded or experiencing performance issues.
1. Increase the timeout setting in the data source configuration under **Advanced HTTP Settings**.
1. Reduce the time range or complexity of your query.
## Authentication errors
The following errors occur when there are issues with authentication credentials or permissions.
### Unauthorized (401)
**Error message:** "401 Unauthorized" or "authorization failed"
**Cause:** The authentication credentials are invalid or missing.
**Solution:**
1. Verify that the token or password is correct in the data source configuration.
1. For Flux and SQL, ensure the token has not expired.
1. For InfluxQL with InfluxDB 2.x, verify the token is set as an `Authorization` header with the value `Token <your-token>`.
1. For InfluxDB 1.x, verify the username and password are correct.
1. Check that the token has the required permissions to access the specified bucket or database.
### Forbidden (403)
**Error message:** "403 Forbidden" or "access denied"
**Cause:** The authenticated user or token does not have permission to access the requested resource.
**Solution:**
1. Verify the token has read access to the specified bucket or database.
1. Check the token's permissions in the InfluxDB UI under **API Tokens**.
1. Ensure the organization ID is correct for Flux queries.
1. For InfluxQL with InfluxDB 2.x, verify the DBRP mapping is configured correctly.
## Configuration errors
The following errors occur when the data source is not configured correctly.
### Unknown influx version
**Error message:** "unknown influx version"
**Cause:** The query language is not properly configured in the data source settings.
**Solution:**
1. Open the data source configuration in Grafana.
1. Verify that a valid query language is selected: **Flux**, **InfluxQL**, or **SQL**.
1. Ensure the selected query language matches your InfluxDB version:
- Flux: InfluxDB 1.8+ and 2.x
- InfluxQL: InfluxDB 1.x and 2.x (with DBRP mapping)
- SQL: InfluxDB 3.x only
### Invalid data source info received
**Error message:** "invalid data source info received"
**Cause:** The data source configuration is incomplete or corrupted.
**Solution:**
1. Delete and recreate the data source.
1. Ensure all required fields are populated based on your query language:
- **Flux:** URL, Organization, Token, Default Bucket
- **InfluxQL:** URL, Database, User, Password
- **SQL:** URL, Database, Token
### DBRP mapping required
**Error message:** "database not found" or queries return no data with InfluxQL on InfluxDB 2.x
**Cause:** InfluxQL queries on InfluxDB 2.x require a Database and Retention Policy (DBRP) mapping.
**Solution:**
1. Create a DBRP mapping in InfluxDB using the CLI or API.
1. Refer to [Manage DBRP Mappings](https://docs.influxdata.com/influxdb/cloud/query-data/influxql/dbrp/) for guidance.
1. Verify the database name in Grafana matches the DBRP mapping.
## Query errors
The following errors occur when there are issues with query syntax or execution.
### Query syntax error
**Error message:** "error parsing query: found THING" or "failed to parse query: found WERE, expected ; at line 1, char 38"
**Cause:** The query contains invalid syntax.
**Solution:**
1. Check your query syntax for typos or invalid keywords.
1. For InfluxQL, verify the query follows the correct syntax:
```sql
SELECT <field> FROM <measurement> WHERE <condition>
```
1. For Flux, ensure proper pipe-forward syntax and function calls.
1. Use the InfluxDB UI or CLI to test your query directly.
### Query timeout limit exceeded
**Error message:** "query-timeout limit exceeded"
**Cause:** The query took longer than the configured timeout limit in InfluxDB.
**Solution:**
1. Reduce the time range of your query.
1. Add more specific filters to limit the data scanned.
1. Increase the query timeout setting in InfluxDB if you have admin access.
1. Optimize your query to reduce complexity.
### Too many series or data points
**Error message:** "max-series-per-database limit exceeded" or "A query returned too many data points and the results have been truncated"
**Cause:** The query is returning more data than the configured limits allow.
**Solution:**
1. Reduce the time range of your query.
1. Add filters to limit the number of series returned.
1. Increase the **Max series** setting in the data source configuration under **Advanced Database Settings**.
1. Use aggregation functions to reduce the number of data points.
1. For Flux, use `aggregateWindow()` to downsample data.
### No time column found
**Error message:** "no time column found"
**Cause:** The query result does not include a time column, which is required for time series visualization.
**Solution:**
1. Ensure your query includes a time field.
1. For Flux, verify the query includes `_time` in the output.
1. For SQL, ensure the query returns a timestamp column.
1. Check that the time field is not being filtered out or excluded.
## Health check errors
The following errors occur when testing the data source connection.
### Error getting flux query buckets
**Error message:** "error getting flux query buckets"
**Cause:** The health check query `buckets()` failed to return results.
**Solution:**
1. Verify the token has permission to list buckets.
1. Check that the organization ID is correct.
1. Ensure InfluxDB is running and accessible.
### Error connecting InfluxDB influxQL
**Error message:** "error connecting InfluxDB influxQL"
**Cause:** The health check query `SHOW MEASUREMENTS` failed.
**Solution:**
1. Verify the database name is correct.
1. Check that the user has permission to run `SHOW MEASUREMENTS`.
1. Ensure the database exists and contains measurements.
1. For InfluxDB 2.x, verify DBRP mapping is configured.
### 0 measurements found
**Error message:** "data source is working. 0 measurements found"
**Cause:** The connection is successful, but the database contains no measurements.
**Solution:**
1. Verify you are connecting to the correct database.
1. Check that data has been written to the database.
1. If the database is new, add some test data to verify the connection.
## Other common issues
The following issues don't produce specific error messages but are commonly encountered.
### Empty query results
**Cause:** The query returns no data.
**Solution:**
1. Verify the time range includes data in your database.
1. Check that the measurement and field names are correct.
1. Test the query directly in the InfluxDB UI or CLI.
1. Ensure filters are not excluding all data.
1. For InfluxQL, verify the retention policy contains data for the selected time range.
### Slow query performance
**Cause:** Queries take a long time to execute.
**Solution:**
1. Reduce the time range of your query.
1. Add more specific filters to limit the data scanned.
1. Increase the **Min time interval** setting to reduce the number of data points.
1. Check InfluxDB server performance and resource utilization.
1. For Flux, use `aggregateWindow()` to downsample data before visualization.
1. Consider using continuous queries or tasks to pre-aggregate data.
### Data appears delayed or missing recent points
**Cause:** The visualization doesn't show the most recent data.
**Solution:**
1. Check the dashboard time range and refresh settings.
1. Verify the **Min time interval** is not set too high.
1. Ensure InfluxDB has finished writing the data.
1. Check for clock synchronization issues between Grafana and InfluxDB.
## Get additional help
If you continue to experience issues after following this troubleshooting guide:
1. Check the [InfluxDB documentation](https://docs.influxdata.com/) for API-specific guidance.
1. Review the [Grafana community forums](https://community.grafana.com/) for similar issues.
1. Contact Grafana Support if you're an Enterprise, Cloud Pro or Cloud Contracted user.
1. When reporting issues, include:
- Grafana version
- InfluxDB version and product (OSS, Cloud, Enterprise)
- Query language (Flux, InfluxQL, or SQL)
- Error messages (redact sensitive information)
- Steps to reproduce
- Relevant configuration such as data source settings, HTTP method, and TLS settings (redact tokens, passwords, and other credentials)

View File

@@ -60,6 +60,7 @@ The following documents will help you get started with the PostgreSQL data sourc
- [Configure the PostgreSQL data source](ref:configure-postgres-data-source)
- [PostgreSQL query editor](ref:postgres-query-editor)
- [Troubleshooting](troubleshooting/)
After you have configured the data source you can:

View File

@@ -0,0 +1,380 @@
---
aliases:
- ../../data-sources/postgres/troubleshooting/
description: Troubleshooting the PostgreSQL data source in Grafana
keywords:
- grafana
- postgresql
- troubleshooting
- errors
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Troubleshooting
title: Troubleshoot issues with the PostgreSQL data source
weight: 600
---
# Troubleshoot issues with the PostgreSQL data source
This document provides troubleshooting information for common errors you may encounter when using the PostgreSQL data source in Grafana.
## Connection errors
The following errors occur when Grafana cannot establish or maintain a connection to PostgreSQL.
### Failed to connect to PostgreSQL
**Error message:** "pq: connection refused" or "dial tcp: connect: connection refused"
**Cause:** Grafana cannot establish a network connection to the PostgreSQL server.
**Solution:**
1. Verify that the Host URL is correct in the data source configuration.
1. Check that PostgreSQL is running and accessible from the Grafana server.
1. Verify the port is correct (the PostgreSQL default port is `5432`).
1. Ensure there are no firewall rules blocking the connection.
1. Check that PostgreSQL is configured to accept connections from the Grafana server in `pg_hba.conf`.
1. For Grafana Cloud, ensure you have configured [Private data source connect](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/) if your PostgreSQL instance is not publicly accessible.
### Request timed out
**Error message:** "context deadline exceeded" or "i/o timeout"
**Cause:** The connection to PostgreSQL timed out before receiving a response.
**Solution:**
1. Check the network latency between Grafana and PostgreSQL.
1. Verify that PostgreSQL is not overloaded or experiencing performance issues.
1. Increase the **Max lifetime** setting in the data source configuration under **Connection limits**.
1. Reduce the time range or complexity of your query.
1. Check if any network devices (load balancers, proxies) are timing out the connection.
### Host not found
**Error message:** "pq: no such host" or "lookup hostname: no such host"
**Cause:** The hostname specified in the data source configuration cannot be resolved.
**Solution:**
1. Verify the hostname is spelled correctly.
1. Check that DNS resolution is working on the Grafana server.
1. Try using an IP address instead of a hostname.
1. Ensure the PostgreSQL server is accessible from the Grafana server's network.
## Authentication errors
The following errors occur when there are issues with authentication credentials or permissions.
### Password authentication failed
**Error message:** "pq: password authentication failed for user"
**Cause:** The username or password is incorrect.
**Solution:**
1. Verify that the username and password are correct in the data source configuration.
1. Check that the user exists in PostgreSQL.
1. Verify the password has not expired.
1. If no password is specified, ensure a [PostgreSQL password file](https://www.postgresql.org/docs/current/static/libpq-pgpass.html) is configured.
### Permission denied
**Error message:** "pq: permission denied for table" or "pq: permission denied for schema"
**Cause:** The database user does not have permission to access the requested table or schema.
**Solution:**
1. Verify the user has `SELECT` permissions on the required tables.
1. Grant the necessary permissions:
```sql
GRANT USAGE ON SCHEMA schema_name TO grafanareader;
GRANT SELECT ON schema_name.table_name TO grafanareader;
```
1. Check that the user has access to the correct database.
1. Verify the search path includes the schema containing your tables.
### No pg_hba.conf entry
**Error message:** "pq: no pg_hba.conf entry for host"
**Cause:** PostgreSQL is not configured to accept connections from the Grafana server.
**Solution:**
1. Edit the `pg_hba.conf` file on the PostgreSQL server.
1. Add an entry to allow connections from the Grafana server:
```text
host database_name username grafana_ip/32 md5
```
1. Reload PostgreSQL configuration: `SELECT pg_reload_conf();`
1. If using SSL, ensure the correct authentication method is specified (for example, `hostssl` instead of `host`).
## TLS and certificate errors
The following errors occur when there are issues with TLS configuration.
### Certificate verification failed
**Error message:** "x509: certificate signed by unknown authority" or "certificate verify failed"
**Cause:** Grafana cannot verify the TLS certificate presented by PostgreSQL.
**Solution:**
1. Set the **TLS/SSL Mode** to the appropriate level (`require`, `verify-ca`, or `verify-full`).
1. If using a self-signed certificate, add the CA certificate in **TLS/SSL Auth Details**.
1. Verify the certificate chain is complete and valid.
1. Ensure the certificate has not expired.
1. For testing only, set **TLS/SSL Mode** to `disable` (not recommended for production).
### SSL not supported
**Error message:** "pq: SSL is not enabled on the server" or "server does not support SSL"
**Cause:** The PostgreSQL server is not configured for SSL connections, but the data source requires SSL.
**Solution:**
1. Set **TLS/SSL Mode** to `disable` if SSL is not required.
1. Alternatively, enable SSL on the PostgreSQL server by configuring `ssl = on` in `postgresql.conf`.
1. Ensure the server has valid SSL certificates configured.
### Client certificate error
**Error message:** "TLS: failed to find any PEM data in certificate input" or "could not load client certificate"
**Cause:** The client certificate or key is invalid or incorrectly formatted.
**Solution:**
1. Verify the certificate and key are in PEM format.
1. Ensure the certificate file path is correct and readable by the Grafana process.
1. Check that the certificate and key match (belong to the same key pair).
1. If using certificate content, ensure you've pasted the complete certificate including headers.
## Database errors
The following errors occur when there are issues with the database configuration.
### Database does not exist
**Error message:** "pq: database 'database_name' does not exist"
**Cause:** The specified database name is incorrect or the database doesn't exist.
**Solution:**
1. Verify the database name in the data source configuration.
1. Check that the database exists: `\l` in psql or `SELECT datname FROM pg_database;`
1. Ensure the database name is case-sensitive and matches exactly.
1. Verify the user has permission to connect to the database.
### Relation does not exist
**Error message:** "pq: relation 'table_name' does not exist"
**Cause:** The specified table or view does not exist, or the user cannot access it.
**Solution:**
1. Verify the table name is correct and exists in the database.
1. Check the schema name if the table is not in the public schema.
1. Use fully qualified names: `schema_name.table_name`.
1. Verify the user has `SELECT` permission on the table.
1. Check the search path: `SHOW search_path;`
## Query errors
The following errors occur when there are issues with SQL syntax or query execution.
### Query syntax error
**Error message:** "pq: syntax error at or near" or "ERROR: syntax error"
**Cause:** The SQL query contains invalid syntax.
**Solution:**
1. Check your query syntax for typos or invalid keywords.
1. Verify column and table names are correctly quoted if they contain special characters or are reserved words.
1. Use double quotes for identifiers: `"column_name"`.
1. Test the query directly in a PostgreSQL client (psql, pgAdmin).
### Column does not exist
**Error message:** "pq: column 'column_name' does not exist"
**Cause:** The specified column name is incorrect or doesn't exist in the table.
**Solution:**
1. Verify the column name is spelled correctly.
1. Check that column names are case-sensitive in PostgreSQL when quoted.
1. Use the correct quoting for column names: `"Column_Name"` for case-sensitive names.
1. Verify the column exists in the table: `\d table_name` in psql.
### No time column found
**Error message:** "no time column found" or time series visualization shows no data
**Cause:** The query result does not include a properly formatted time column.
**Solution:**
1. Ensure your query includes a column named `time` that returns a timestamp or epoch value.
1. Use an alias to rename your time column: `SELECT created_at AS time`.
1. Ensure the time column is of type `timestamp`, `timestamptz`, or a numeric epoch value.
1. Order results by the time column: `ORDER BY time ASC`.
### Macro expansion error
**Error message:** "macro '$__timeFilter' not found" or incorrect query results with macros
**Cause:** Grafana macros are not being properly expanded.
**Solution:**
1. Verify the macro syntax is correct, for example `$__timeFilter(time_column)`.
1. Ensure the column name passed to the macro exists in your table.
1. Use the **Preview** toggle in Builder mode to see the expanded query.
1. For time-based macros, ensure the column contains timestamp data.
## Performance issues
The following issues relate to slow query execution or resource constraints.
### Query timeout
**Error message:** "canceling statement due to statement timeout" or "query timeout"
**Cause:** The query took longer than the configured timeout.
**Solution:**
1. Reduce the time range of your query.
1. Add indexes to columns used in WHERE clauses and joins.
1. Use the `$__timeFilter` macro to limit data to the dashboard time range.
1. Increase the statement timeout in PostgreSQL if you have admin access.
1. Optimize your query to reduce complexity.
### Too many connections
**Error message:** "pq: sorry, too many clients already" or "connection pool exhausted"
**Cause:** The maximum number of connections to PostgreSQL has been reached.
**Solution:**
1. Reduce the **Max open** connections setting in the data source configuration.
1. Increase `max_connections` in PostgreSQL's `postgresql.conf` if you have admin access.
1. Check for connection leaks in other applications connecting to the same database.
1. Enable **Auto max idle** to automatically manage idle connections.
### Slow query performance
**Cause:** Queries take a long time to execute.
**Solution:**
1. Reduce the time range of your query.
1. Add appropriate indexes to your tables.
1. Use the `$__timeFilter` macro to limit the data scanned.
1. Increase the **Min time interval** setting to reduce the number of data points.
1. Use `EXPLAIN ANALYZE` in PostgreSQL to identify query bottlenecks.
1. Consider using materialized views for complex aggregations.
## Provisioning errors
The following errors occur when provisioning the data source via YAML.
### Invalid provisioning configuration
**Error message:** "metric request error" or data source test fails after provisioning
**Cause:** The provisioning YAML file contains incorrect configuration.
**Solution:**
1. Ensure parameter names match the expected format exactly.
1. Verify the database name is **not** included in the URL.
1. Use the correct format for the URL: `hostname:port`.
1. Check that string values are properly quoted in the YAML file.
1. Refer to the [provisioning example](../configure/#provision-the-data-source) for the correct format.
Example correct configuration:
```yaml
datasources:
- name: Postgres
type: postgres
url: localhost:5432
user: grafana
secureJsonData:
password: 'Password!'
jsonData:
database: grafana
sslmode: 'disable'
```
## Other common issues
The following issues don't produce specific error messages but are commonly encountered.
### Empty query results
**Cause:** The query returns no data.
**Solution:**
1. Verify the time range includes data in your database.
1. Check that table and column names are correct.
1. Test the query directly in PostgreSQL.
1. Ensure filters are not excluding all data.
1. Verify the `$__timeFilter` macro is using the correct time column.
### TimescaleDB functions not available
**Cause:** TimescaleDB-specific functions like `time_bucket` are not available in the query builder.
**Solution:**
1. Enable the **TimescaleDB** toggle in the data source configuration under **PostgreSQL Options**.
1. Verify TimescaleDB is installed and enabled in your PostgreSQL database.
1. Check that the `timescaledb` extension is created: `CREATE EXTENSION IF NOT EXISTS timescaledb;`
### Data appears delayed or missing recent points
**Cause:** The visualization doesn't show the most recent data.
**Solution:**
1. Check the dashboard time range and refresh settings.
1. Verify the **Min time interval** is not set too high.
1. Ensure data has been committed to the database (not in an uncommitted transaction).
1. Check for clock synchronization issues between Grafana and PostgreSQL.
## Get additional help
If you continue to experience issues after following this troubleshooting guide:
1. Check the [PostgreSQL documentation](https://www.postgresql.org/docs/) for database-specific guidance.
1. Review the [Grafana community forums](https://community.grafana.com/) for similar issues.
1. Contact Grafana Support if you are a Cloud Pro, Cloud Contracted, or Enterprise user.
1. When reporting issues, include:
- Grafana version
- PostgreSQL version
- Error messages (redact sensitive information)
- Steps to reproduce
- Relevant configuration such as data source settings, TLS mode, and connection limits (redact passwords and other credentials)

View File

@@ -119,7 +119,14 @@ describe('Get y range', () => {
values: [2, 1.999999999999999, 2.000000000000001, 2, 2],
type: FieldType.number,
config: {},
state: { range: { min: 1.999999999999999, max: 2.000000000000001, delta: 0 } },
state: { range: { min: 1.9999999999999999999, max: 2.000000000000000001, delta: 0 } },
};
const decimalsNotCloseYField: Field = {
name: 'y',
values: [2, 0.0094, 0.0053, 0.0078, 0.0061],
type: FieldType.number,
config: {},
state: { range: { min: 0.0053, max: 0.0094, delta: 0.0041 } },
};
const xField: Field = {
name: 'x',
@@ -183,6 +190,11 @@ describe('Get y range', () => {
field: decimalsCloseYField,
expected: [2, 4],
},
{
description: 'decimal values which are not close to equal should not be rounded out',
field: decimalsNotCloseYField,
expected: [0.0053, 0.0094],
},
])(`should return correct range for $description`, ({ field, expected }) => {
const actual = getYRange(getAlignedFrame(field));
expect(actual).toEqual(expected);

View File

@@ -8,6 +8,7 @@ import {
FieldType,
getFieldColorModeForField,
GrafanaTheme2,
guessDecimals,
isLikelyAscendingVector,
nullToValue,
roundDecimals,
@@ -76,8 +77,6 @@ export function getYRange(alignedFrame: DataFrame): Range.MinMax {
min = Math.min(min!, field.config.min ?? Infinity);
max = Math.max(max!, field.config.max ?? -Infinity);
// console.log({ min, max });
// if noValue is set, ensure that it is included in the range as well
const noValue = +field.config?.noValue!;
if (!Number.isNaN(noValue)) {
@@ -85,9 +84,11 @@ export function getYRange(alignedFrame: DataFrame): Range.MinMax {
max = Math.max(max, noValue);
}
const decimals = field.config.decimals ?? Math.max(guessDecimals(min), guessDecimals(max));
// call roundDecimals to mirror what is going to eventually happen in uplot
let roundedMin = roundDecimals(min, field.config.decimals ?? 0);
let roundedMax = roundDecimals(max, field.config.decimals ?? 0);
let roundedMin = roundDecimals(min, decimals);
let roundedMax = roundDecimals(max, decimals);
// if the rounded min and max are different,
// we can return the real min and max.
@@ -102,11 +103,9 @@ export function getYRange(alignedFrame: DataFrame): Range.MinMax {
roundedMax = 1;
} else if (roundedMin < 0) {
// both are negative
// max = 0;
roundedMin *= 2;
} else {
// both are positive
// min = 0;
roundedMax *= 2;
}

View File

@@ -77,6 +77,10 @@ var (
"user.sync.user-externalUID-mismatch",
errutil.WithPublicMessage("User externalUID mismatch"),
)
errSCIMAuthModuleMismatch = errutil.Unauthorized(
"user.sync.scim-auth-module-mismatch",
errutil.WithPublicMessage("User was provisioned via SCIM and must login via SAML"),
)
)
var (
@@ -308,6 +312,21 @@ func (s *UserSync) SyncUserHook(ctx context.Context, id *authn.Identity, _ *auth
// just try to fetch the user one more to make the other request work.
if errors.Is(err, user.ErrUserAlreadyExists) {
usr, _, err = s.getUser(ctx, id)
// Check if this is a SCIM-provisioned user trying to login via an auth module that is not SAML or GCOM
if err == nil && usr != nil && usr.IsProvisioned && id.AuthenticatedBy != login.GrafanaComAuthModule {
_, authErr := s.authInfoService.GetAuthInfo(ctx, &login.GetAuthInfoQuery{
UserId: usr.ID,
AuthModule: id.AuthenticatedBy,
})
if errors.Is(authErr, user.ErrUserNotFound) {
s.log.FromContext(ctx).Error("SCIM-provisioned user attempted login via non-SAML auth module",
"user_id", usr.ID,
"attempted_module", id.AuthenticatedBy,
)
return errSCIMAuthModuleMismatch.Errorf("user was provisioned via SCIM but attempted login via %s", id.AuthenticatedBy)
}
}
}
if err != nil {

View File

@@ -1926,3 +1926,100 @@ func TestUserSync_SCIMLoginUsageStatSet(t *testing.T) {
finalCount := finalStats["stats.features.scim.has_successful_login.count"].(int)
require.Equal(t, int(1), finalCount)
}
func TestUserSync_SyncUserHook_SCIMAuthModuleMismatch(t *testing.T) {
userSrv := usertest.NewMockService(t)
authInfoSrv := authinfotest.NewMockAuthInfoService(t)
userSrv.On("GetByEmail", mock.Anything, mock.Anything).Return(nil, user.ErrUserNotFound).Once()
userSrv.On("Create", mock.Anything, mock.Anything).Return(nil, user.ErrUserAlreadyExists).Once()
userSrv.On("GetByEmail", mock.Anything, mock.Anything).Return(&user.User{
ID: 1,
Email: "test@test.com",
IsProvisioned: true,
}, nil).Once()
authInfoSrv.On("GetAuthInfo", mock.Anything, mock.MatchedBy(func(q *login.GetAuthInfoQuery) bool {
return q.AuthModule == "oauth_azuread"
})).Return(nil, user.ErrUserNotFound).Once()
s := ProvideUserSync(
userSrv,
authinfoimpl.ProvideOSSUserProtectionService(),
authInfoSrv,
&quotatest.FakeQuotaService{},
tracing.NewNoopTracerService(),
featuremgmt.WithFeatures(),
setting.NewCfg(),
nil,
)
email := "test@test.com"
err := s.SyncUserHook(context.Background(), &authn.Identity{
AuthenticatedBy: "oauth_azuread",
ClientParams: authn.ClientParams{
SyncUser: true,
AllowSignUp: true,
LookUpParams: login.UserLookupParams{
Email: &email,
},
},
}, nil)
require.Error(t, err)
assert.ErrorIs(t, err, errSCIMAuthModuleMismatch)
assert.Contains(t, err.Error(), "SCIM")
assert.Contains(t, err.Error(), "oauth_azuread")
}
func TestUserSync_SyncUserHook_SCIMUserAllowsGCOMLogin(t *testing.T) {
userSrv := usertest.NewMockService(t)
authInfoSrv := authinfotest.NewMockAuthInfoService(t)
authInfoSrv.On("GetAuthInfo", mock.Anything, mock.MatchedBy(func(q *login.GetAuthInfoQuery) bool {
return q.AuthModule == login.GrafanaComAuthModule && q.AuthId == "gcom-user-123"
})).Return(nil, user.ErrUserNotFound).Once()
userSrv.On("GetByEmail", mock.Anything, mock.Anything).Return(nil, user.ErrUserNotFound).Once()
userSrv.On("Create", mock.Anything, mock.Anything).Return(nil, user.ErrUserAlreadyExists).Once()
authInfoSrv.On("GetAuthInfo", mock.Anything, mock.MatchedBy(func(q *login.GetAuthInfoQuery) bool {
return q.AuthModule == login.GrafanaComAuthModule && q.AuthId == "gcom-user-123"
})).Return(nil, user.ErrUserNotFound).Once()
userSrv.On("GetByEmail", mock.Anything, mock.Anything).Return(&user.User{
ID: 1,
Email: "test@test.com",
IsProvisioned: true,
}, nil).Once()
s := ProvideUserSync(
userSrv,
authinfoimpl.ProvideOSSUserProtectionService(),
authInfoSrv,
&quotatest.FakeQuotaService{},
tracing.NewNoopTracerService(),
featuremgmt.WithFeatures(),
setting.NewCfg(),
nil,
)
email := "test@test.com"
err := s.SyncUserHook(context.Background(), &authn.Identity{
AuthenticatedBy: login.GrafanaComAuthModule,
AuthID: "gcom-user-123",
ClientParams: authn.ClientParams{
SyncUser: true,
AllowSignUp: true,
LookUpParams: login.UserLookupParams{
Email: &email,
},
},
}, nil)
require.NoError(t, err)
}

View File

@@ -4,8 +4,12 @@ import (
"google.golang.org/protobuf/types/known/structpb"
authzv1 "github.com/grafana/authlib/authz/proto/v1"
dashboardV1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1"
folders "github.com/grafana/grafana/apps/folder/pkg/apis/folder/v1beta1"
iamv0alpha1 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
"github.com/grafana/grafana/pkg/apimachinery/utils"
"github.com/grafana/grafana/pkg/services/accesscontrol"
authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
)
@@ -44,7 +48,8 @@ func getTypeInfo(group, resource string) (typeInfo, bool) {
func NewResourceInfoFromCheck(r *authzv1.CheckRequest) ResourceInfo {
typ, relations := getTypeAndRelations(r.GetGroup(), r.GetResource())
return newResource(
resource := newResource(
typ,
r.GetGroup(),
r.GetResource(),
@@ -53,6 +58,19 @@ func NewResourceInfoFromCheck(r *authzv1.CheckRequest) ResourceInfo {
r.GetSubresource(),
relations,
)
// Special case for creating folders and resources in the root folder
if r.GetVerb() == utils.VerbCreate {
if resource.IsFolderResource() && resource.name == "" {
resource.name = accesscontrol.GeneralFolderUID
} else if resource.HasFolderSupport() && resource.folder == "" {
resource.folder = accesscontrol.GeneralFolderUID
}
return resource
}
return resource
}
func NewResourceInfoFromBatchItem(i *authzextv1.BatchCheckItem) ResourceInfo {
@@ -164,3 +182,15 @@ func (r ResourceInfo) IsValidRelation(relation string) bool {
func (r ResourceInfo) HasSubresource() bool {
return r.subresource != ""
}
var resourcesWithFolderSupport = map[string]bool{
dashboardV1.DashboardResourceInfo.GroupResource().Group: true,
}
func (r ResourceInfo) HasFolderSupport() bool {
return resourcesWithFolderSupport[r.group]
}
func (r ResourceInfo) IsFolderResource() bool {
return r.group == folders.FolderResourceInfo.GroupResource().Group
}

View File

@@ -228,6 +228,9 @@ func TranslateToResourceTuple(subject string, action, kind, name string) (*openf
}
if name == "*" {
if m.group != "" && m.resource != "" {
return NewGroupResourceTuple(subject, m.relation, m.group, m.resource, m.subresource), true
}
return NewGroupResourceTuple(subject, m.relation, translation.group, translation.resource, m.subresource), true
}

View File

@@ -0,0 +1,89 @@
package common
import (
"testing"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/types/known/structpb"
)
type translationTestCase struct {
testName string
subject string
action string
kind string
name string
expected *openfgav1.TupleKey
}
func TestTranslateToResourceTuple(t *testing.T) {
tests := []translationTestCase{
{
testName: "dashboards:read in folders",
subject: "user:1",
action: "dashboards:read",
kind: "folders",
name: "*",
expected: &openfgav1.TupleKey{
User: "user:1",
Relation: "get",
Object: "group_resource:dashboard.grafana.app/dashboards",
},
},
{
testName: "dashboards:read for all dashboards",
subject: "user:1",
action: "dashboards:read",
kind: "dashboards",
name: "*",
expected: &openfgav1.TupleKey{
User: "user:1",
Relation: "get",
Object: "group_resource:dashboard.grafana.app/dashboards",
},
},
{
testName: "dashboards:read for general folder",
subject: "user:1",
action: "dashboards:read",
kind: "folders",
name: "general",
expected: &openfgav1.TupleKey{
User: "user:1",
Relation: "resource_get",
Object: "folder:general",
Condition: &openfgav1.RelationshipCondition{
Name: "subresource_filter",
Context: &structpb.Struct{
Fields: map[string]*structpb.Value{
"subresources": structpb.NewListValue(&structpb.ListValue{
Values: []*structpb.Value{structpb.NewStringValue("dashboard.grafana.app/dashboards")},
}),
},
},
},
},
},
{
testName: "folders:read",
subject: "user:1",
action: "folders:read",
kind: "folders",
name: "*",
expected: &openfgav1.TupleKey{
User: "user:1",
Relation: "get",
Object: "group_resource:folder.grafana.app/folders",
},
},
}
for _, test := range tests {
t.Run(test.testName, func(t *testing.T) {
tuple, ok := TranslateToResourceTuple(test.subject, test.action, test.kind, test.name)
require.True(t, ok)
require.EqualExportedValues(t, test.expected, tuple)
})
}
}

View File

@@ -212,4 +212,16 @@ func testCheck(t *testing.T, server *Server) {
require.NoError(t, err)
assert.True(t, res.GetAllowed(), "user should be able to view dashboards in folder 6")
})
t.Run("user:18 should be able to create folder in root folder", func(t *testing.T) {
res, err := server.Check(newContextWithNamespace(), newReq("user:18", utils.VerbCreate, folderGroup, folderResource, "", "", ""))
require.NoError(t, err)
assert.Equal(t, true, res.GetAllowed())
})
t.Run("user:18 should be able to create dashboard in root folder", func(t *testing.T) {
res, err := server.Check(newContextWithNamespace(), newReq("user:18", utils.VerbCreate, dashboardGroup, dashboardResource, "", "", ""))
require.NoError(t, err)
assert.Equal(t, true, res.GetAllowed())
})
}

View File

@@ -71,6 +71,8 @@ func setup(t *testing.T, srv *Server) *Server {
common.NewTypedResourceTuple("user:15", common.RelationGet, common.TypeUser, userGroup, userResource, statusSubresource, "1"),
common.NewTypedResourceTuple("user:16", common.RelationGet, common.TypeServiceAccount, serviceAccountGroup, serviceAccountResource, statusSubresource, "1"),
common.NewFolderTuple("user:17", common.RelationSetView, "4"),
common.NewFolderTuple("user:18", common.RelationCreate, "general"),
common.NewFolderResourceTuple("user:18", common.RelationCreate, dashboardGroup, dashboardResource, "", "general"),
}
return setupOpenFGADatabase(t, srv, tuples)

View File

@@ -304,8 +304,15 @@ type DeleteDashboardCommand struct {
RemovePermissions bool
}
type ProvisioningConfig struct {
Name string
OrgID int64
Folder string
AllowUIUpdates bool
}
type DeleteOrphanedProvisionedDashboardsCommand struct {
ReaderNames []string
Config []ProvisioningConfig
}
type DashboardProvisioningSearchResults struct {
@@ -405,6 +412,8 @@ type DashboardSearchProjection struct {
FolderTitle string
SortMeta int64
Tags []string
ManagedBy utils.ManagerKind
ManagerId string
Deleted *time.Time
}

View File

@@ -877,24 +877,32 @@ func (dr *DashboardServiceImpl) waitForSearchQuery(ctx context.Context, query *d
}
func (dr *DashboardServiceImpl) DeleteOrphanedProvisionedDashboards(ctx context.Context, cmd *dashboards.DeleteOrphanedProvisionedDashboardsCommand) error {
// cleanup duplicate provisioned dashboards first (this will have the same name and external_id)
// note: only works in modes 1-3
if err := dr.DeleteDuplicateProvisionedDashboards(ctx); err != nil {
dr.log.Error("Failed to delete duplicate provisioned dashboards", "error", err)
}
// check each org for orphaned provisioned dashboards
orgs, err := dr.orgService.Search(ctx, &org.SearchOrgsQuery{})
if err != nil {
return err
}
orgIDs := make([]int64, 0, len(orgs))
for _, org := range orgs {
orgIDs = append(orgIDs, org.ID)
}
if err := dr.DeleteDuplicateProvisionedDashboards(ctx, orgIDs, cmd.Config); err != nil {
dr.log.Error("Failed to delete duplicate provisioned dashboards", "error", err)
}
currentNames := make([]string, 0, len(cmd.Config))
for _, cfg := range cmd.Config {
currentNames = append(currentNames, cfg.Name)
}
for _, org := range orgs {
ctx, _ := identity.WithServiceIdentity(ctx, org.ID)
// find all dashboards in the org that have a file repo set that is not in the given readers list
foundDashs, err := dr.searchProvisionedDashboardsThroughK8s(ctx, &dashboards.FindPersistedDashboardsQuery{
ManagedBy: utils.ManagerKindClassicFP, //nolint:staticcheck
ManagerIdentityNotIn: cmd.ReaderNames,
ManagerIdentityNotIn: currentNames,
OrgId: org.ID,
})
if err != nil {
@@ -921,7 +929,129 @@ func (dr *DashboardServiceImpl) DeleteOrphanedProvisionedDashboards(ctx context.
return nil
}
func (dr *DashboardServiceImpl) DeleteDuplicateProvisionedDashboards(ctx context.Context) error {
// searchExistingProvisionedData fetches provisioned data for the purposes of
// duplication cleanup. Returns the set of folder UIDs for folders with the
// given title, and the set of resources contained in those folders.
func (dr *DashboardServiceImpl) searchExistingProvisionedData(
ctx context.Context, orgID int64, folderTitle string,
) ([]string, []dashboards.DashboardSearchProjection, error) {
ctx, user := identity.WithServiceIdentity(ctx, orgID)
cmd := folder.SearchFoldersQuery{
OrgID: orgID,
SignedInUser: user,
Title: folderTitle,
TitleExactMatch: true,
}
searchResults, err := dr.folderService.SearchFolders(ctx, cmd)
if err != nil {
return nil, nil, fmt.Errorf("checking if provisioning reset is required: %w", err)
}
var matchingFolders []string //nolint:prealloc
for _, result := range searchResults {
f, err := dr.folderService.Get(ctx, &folder.GetFolderQuery{
OrgID: orgID,
UID: &result.UID,
SignedInUser: user,
})
if err != nil {
return nil, nil, err
}
// We are only interested in folders at the top-level of the folder hierarchy.
// Cleanup is not performed for provisioned folders that were moved to
// a different location.
if f.ParentUID != "" {
continue
}
matchingFolders = append(matchingFolders, f.UID)
}
if len(matchingFolders) == 0 {
// If there are no folders with the same title as the provisioned folder we
// are looking for, there is nothing to be cleaned up.
return nil, nil, nil
}
resources, err := dr.FindDashboards(ctx, &dashboards.FindPersistedDashboardsQuery{
OrgId: orgID,
SignedInUser: user,
FolderUIDs: matchingFolders,
})
if err != nil {
return nil, nil, err
}
return matchingFolders, resources, nil
}
// maybeResetProvisioning will check for duplicated provisioned dashboards in the database. These duplications
// happen when multiple provisioned dashboards of the same title are found, or multiple provisioned
// folders are found. In this case, provisioned resources are deleted, allowing the provisioning
// process to start from scratch after this function returns.
func (dr *DashboardServiceImpl) maybeResetProvisioning(ctx context.Context, orgs []int64, configs []dashboards.ProvisioningConfig) {
if skipReason := canBeAutomaticallyCleanedUp(configs); skipReason != "" {
dr.log.Info("not eligible for automated cleanup", "reason", skipReason)
return
}
folderTitle := configs[0].Folder
provisionedNames := map[string]bool{}
for _, c := range configs {
provisionedNames[c.Name] = true
}
for _, orgID := range orgs {
ctx, user := identity.WithServiceIdentity(ctx, orgID)
provFolders, resources, err := dr.searchExistingProvisionedData(ctx, orgID, folderTitle)
if err != nil {
dr.log.Error("failed to search for provisioned data for cleanup", "org", orgID, "error", err)
continue
}
steps, err := cleanupSteps(provFolders, resources, provisionedNames)
if err != nil {
dr.log.Warn("not possible to perform automated duplicate cleanup", "org", orgID, "error", err)
continue
}
for _, step := range steps {
var err error
switch step.Type {
case searchstore.TypeDashboard:
err = dr.deleteDashboard(ctx, 0, step.UID, orgID, false)
case searchstore.TypeFolder:
err = dr.folderService.Delete(ctx, &folder.DeleteFolderCommand{
OrgID: orgID,
SignedInUser: user,
UID: step.UID,
})
}
if err == nil {
dr.log.Info("deleted duplicated provisioned resource",
"type", step.Type, "uid", step.UID,
)
} else {
dr.log.Error("failed to delete duplicated provisioned resource",
"type", step.Type, "uid", step.UID, "error", err,
)
}
}
}
}
func (dr *DashboardServiceImpl) DeleteDuplicateProvisionedDashboards(ctx context.Context, orgs []int64, configs []dashboards.ProvisioningConfig) error {
// Start from scratch if duplications that cannot be fixed by the logic
// below are found in the database.
dr.maybeResetProvisioning(ctx, orgs, configs)
// cleanup duplicate provisioned dashboards (i.e., with the same name and external_id).
// Note: only works in modes 1-3. This logic can be removed once mode5 is
// enabled everywhere.
duplicates, err := dr.dashboardStore.GetDuplicateProvisionedDashboards(ctx)
if err != nil {
return err
@@ -1511,6 +1641,8 @@ func (dr *DashboardServiceImpl) FindDashboards(ctx context.Context, query *dashb
FolderTitle: folderTitle,
FolderID: folderID,
FolderSlug: slugify.Slugify(folderTitle),
ManagedBy: hit.ManagedBy.Kind,
ManagerId: hit.ManagedBy.ID,
Tags: hit.Tags,
}

View File

@@ -779,7 +779,7 @@ func TestDeleteOrphanedProvisionedDashboards(t *testing.T) {
}, nil).Twice()
err := service.DeleteOrphanedProvisionedDashboards(context.Background(), &dashboards.DeleteOrphanedProvisionedDashboardsCommand{
ReaderNames: []string{"test"},
Config: []dashboards.ProvisioningConfig{{Name: "test"}},
})
require.NoError(t, err)
k8sCliMock.AssertExpectations(t)
@@ -874,7 +874,7 @@ func TestDeleteOrphanedProvisionedDashboards(t *testing.T) {
}, nil).Once()
err := singleOrgService.DeleteOrphanedProvisionedDashboards(ctx, &dashboards.DeleteOrphanedProvisionedDashboardsCommand{
ReaderNames: []string{"test"},
Config: []dashboards.ProvisioningConfig{{Name: "test"}},
})
require.NoError(t, err)
k8sCliMock.AssertExpectations(t)
@@ -906,7 +906,7 @@ func TestDeleteOrphanedProvisionedDashboards(t *testing.T) {
}, nil)
err := singleOrgService.DeleteOrphanedProvisionedDashboards(ctx, &dashboards.DeleteOrphanedProvisionedDashboardsCommand{
ReaderNames: []string{"test"},
Config: []dashboards.ProvisioningConfig{{Name: "test"}},
})
require.NoError(t, err)
k8sCliMock.AssertExpectations(t)

View File

@@ -0,0 +1,107 @@
package service
import (
"errors"
"fmt"
"github.com/grafana/grafana/pkg/apimachinery/utils"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/sqlstore/searchstore"
)
// canBeAutomaticallyCleanedUp determines whether this instance can be automatically cleaned up
// if duplicated provisioned resources are found. To ensure the process does not delete
// resources it shouldn't, automatic cleanups only happen if all provisioned dashboards
// are stored in the same folder (by title), and no dashboards allow UI updates.
func canBeAutomaticallyCleanedUp(configs []dashboards.ProvisioningConfig) string {
if len(configs) == 0 {
return "no provisioned dashboards"
}
folderTitle := configs[0].Folder
if len(folderTitle) == 0 {
return fmt.Sprintf("dashboard has no folder: %s", configs[0].Name)
}
for _, cfg := range configs {
if cfg.AllowUIUpdates {
return "contains dashboards with allowUiUpdates"
}
if cfg.Folder != folderTitle {
return "dashboards provisioned across multiple folders"
}
}
return ""
}
type deleteProvisionedResource struct {
Type string
UID string
}
// cleanupSteps computes the sequence of steps to be performed in order to cleanup the
// provisioning resources and allow the process to start from scratch when duplication
// is detected. The sequence of steps will dictate the order in which dashboards and folders
// are to be deleted.
func cleanupSteps(provFolders []string, resources []dashboards.DashboardSearchProjection, configDashboards map[string]bool) ([]deleteProvisionedResource, error) {
var hasDuplicatedProvisionedDashboard bool
var hasUserCreatedResource bool
var uniqueNames = map[string]struct{}{}
var deleteProvisionedDashboards []deleteProvisionedResource //nolint:prealloc
for _, r := range resources {
// nolint:staticcheck
if r.IsFolder || r.ManagedBy != utils.ManagerKindClassicFP {
hasUserCreatedResource = true
continue
}
// Only delete dashboards if they are included in the provisioning configuration
// for this instance.
if !configDashboards[r.ManagerId] {
continue
}
if _, exists := uniqueNames[r.ManagerId]; exists {
hasDuplicatedProvisionedDashboard = true
}
uniqueNames[r.ManagerId] = struct{}{}
deleteProvisionedDashboards = append(deleteProvisionedDashboards, deleteProvisionedResource{
Type: searchstore.TypeDashboard,
UID: r.UID,
})
}
if len(provFolders) == 0 {
// When there are no provisioned folders, there is nothing to do.
return nil, nil
} else if len(provFolders) == 1 {
// If only one folder was found, keep it and delete the provisioned dashboards if
// duplication was found.
if hasDuplicatedProvisionedDashboard {
return deleteProvisionedDashboards, nil
}
} else {
// If multiple folders were found *and* a user-created resource exists in
// one of them, bail, as we wouldn't be able to delete one of the duplicated folders.
if hasUserCreatedResource {
return nil, errors.New("multiple provisioning folders exist with at least one user-created resource")
}
// Delete provisioned dashboards first, and then the folders.
steps := deleteProvisionedDashboards
for _, uid := range provFolders {
steps = append(steps, deleteProvisionedResource{
Type: searchstore.TypeFolder,
UID: uid,
})
}
return steps, nil
}
return nil, nil
}

View File

@@ -0,0 +1,279 @@
package service
import (
"testing"
"github.com/grafana/grafana/pkg/apimachinery/utils"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/sqlstore/searchstore"
"github.com/stretchr/testify/require"
)
func Test_canBeAutomaticallyCleanedUp(t *testing.T) {
testCases := []struct {
name string
configs []dashboards.ProvisioningConfig
expectedSkip string
}{
{
name: "no dashboards defined in the configuration",
configs: []dashboards.ProvisioningConfig{},
expectedSkip: "no provisioned dashboards",
},
{
name: "first defined dashboard has no folder defined",
configs: []dashboards.ProvisioningConfig{
{Name: "1", Folder: ""},
{Folder: "f1"},
},
expectedSkip: "dashboard has no folder: 1",
},
{
name: "one of the provisioned dashboards has no folder defined",
configs: []dashboards.ProvisioningConfig{
{Name: "1", Folder: "f1"},
{Name: "2", Folder: "f1"},
{Name: "3", Folder: ""},
{Name: "4", Folder: "f1"},
},
expectedSkip: "dashboards provisioned across multiple folders",
},
{
name: "one of the provisioned dashboards allows UI updates",
configs: []dashboards.ProvisioningConfig{
{Name: "1", Folder: "f1"},
{Name: "2", Folder: "f1", AllowUIUpdates: true},
{Name: "3", Folder: "f1"},
{Name: "4", Folder: "f1"},
},
expectedSkip: "contains dashboards with allowUiUpdates",
},
{
name: "one of the provisioned dashboards is in a different folder",
configs: []dashboards.ProvisioningConfig{
{Name: "1", Folder: "f1"},
{Name: "2", Folder: "f1"},
{Name: "3", Folder: "f1"},
{Name: "4", Folder: "different"},
},
expectedSkip: "dashboards provisioned across multiple folders",
},
{
name: "can be skipped when all conditions are met",
configs: []dashboards.ProvisioningConfig{
{Name: "1", Folder: "f1"},
{Name: "2", Folder: "f1"},
{Name: "3", Folder: "f1"},
{Name: "4", Folder: "f1"},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
require.Equal(t, tc.expectedSkip, canBeAutomaticallyCleanedUp(tc.configs))
})
}
}
func Test_cleanupSteps(t *testing.T) {
isDashboard, isFolder := false, true
fromUser := func(uid, name string, isFolder bool) dashboards.DashboardSearchProjection {
return dashboards.DashboardSearchProjection{
UID: uid,
ManagerId: name,
IsFolder: isFolder,
}
}
provisioned := func(uid, name string, isFolder bool) dashboards.DashboardSearchProjection {
dashboard := fromUser(uid, name, isFolder)
dashboard.ManagedBy = utils.ManagerKindClassicFP //nolint:staticcheck
return dashboard
}
testCases := []struct {
name string
provisionedFolders []string
provisionedResources []dashboards.DashboardSearchProjection
configDashboards []string
expectedSteps []deleteProvisionedResource
expectedErr string
}{
{
name: "no provisioned folders, nothing to do",
provisionedFolders: []string{},
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3"},
provisionedResources: []dashboards.DashboardSearchProjection{
provisioned("d1", "Provisioned1", isDashboard),
},
},
{
name: "multiple folders, a user-created dashboard in one of them",
provisionedFolders: []string{"folder1", "folder2"},
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3"},
provisionedResources: []dashboards.DashboardSearchProjection{
provisioned("d1", "Provisioned1", isDashboard),
provisioned("d2", "Provisioned2", isDashboard),
fromUser("d3", "User1", isDashboard),
provisioned("d4", "Provisioned3", isDashboard),
},
expectedErr: "multiple provisioning folders exist with at least one user-created resource",
},
{
name: "multiple folders, a user-created folder in one of them",
provisionedFolders: []string{"folder1", "folder2"},
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
provisionedResources: []dashboards.DashboardSearchProjection{
provisioned("d1", "Provisioned1", isDashboard),
provisioned("d2", "Provisioned2", isDashboard),
provisioned("d3", "Provisioned3", isDashboard),
fromUser("f1", "UserFolder1", isFolder),
},
expectedErr: "multiple provisioning folders exist with at least one user-created resource",
},
{
name: "single folder, some dashboards duplicated",
provisionedFolders: []string{"folder1"},
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
provisionedResources: []dashboards.DashboardSearchProjection{
// Provisioned1 is duplicated.
provisioned("d1", "Provisioned1", isDashboard),
provisioned("d2", "Provisioned2", isDashboard),
provisioned("d3", "Provisioned1", isDashboard),
provisioned("d4", "Provisioned3", isDashboard),
},
expectedSteps: []deleteProvisionedResource{
{Type: searchstore.TypeDashboard, UID: "d1"},
{Type: searchstore.TypeDashboard, UID: "d2"},
{Type: searchstore.TypeDashboard, UID: "d3"},
{Type: searchstore.TypeDashboard, UID: "d4"},
},
},
{
name: "single folder, duplicated dashboards, user-created dashboards are ignored",
provisionedFolders: []string{"folder1"},
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
provisionedResources: []dashboards.DashboardSearchProjection{
// Provisioned1 is duplicated.
provisioned("d1", "Provisioned1", isDashboard),
provisioned("d2", "Provisioned2", isDashboard),
fromUser("d3", "User1", isDashboard),
provisioned("d4", "Provisioned3", isDashboard),
provisioned("d5", "Provisioned1", isDashboard),
},
// User dashboard (d3) is not deleted.
expectedSteps: []deleteProvisionedResource{
{Type: searchstore.TypeDashboard, UID: "d1"},
{Type: searchstore.TypeDashboard, UID: "d2"},
{Type: searchstore.TypeDashboard, UID: "d4"},
{Type: searchstore.TypeDashboard, UID: "d5"},
},
},
{
name: "single folder, duplicated dashboards, user-created folders are ignored",
provisionedFolders: []string{"folder1"},
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3"},
provisionedResources: []dashboards.DashboardSearchProjection{
// Provisioned1 is duplicated.
provisioned("d1", "Provisioned1", isDashboard),
provisioned("d2", "Provisioned2", isDashboard),
provisioned("d3", "Provisioned3", isDashboard),
provisioned("d4", "Provisioned1", isDashboard),
fromUser("f1", "UserFolder1", isFolder),
},
// User folder (f1) is not deleted.
expectedSteps: []deleteProvisionedResource{
{Type: searchstore.TypeDashboard, UID: "d1"},
{Type: searchstore.TypeDashboard, UID: "d2"},
{Type: searchstore.TypeDashboard, UID: "d3"},
{Type: searchstore.TypeDashboard, UID: "d4"},
},
},
{
name: "multiple folders, only provisioned dashboards",
provisionedFolders: []string{"folder1", "folder2"},
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
provisionedResources: []dashboards.DashboardSearchProjection{
provisioned("d1", "Provisioned1", isDashboard),
provisioned("d2", "Provisioned2", isDashboard),
provisioned("d3", "Provisioned3", isDashboard),
provisioned("d4", "Provisioned4", isDashboard),
},
// Delete all dashboards, then all folders.
expectedSteps: []deleteProvisionedResource{
{Type: searchstore.TypeDashboard, UID: "d1"},
{Type: searchstore.TypeDashboard, UID: "d2"},
{Type: searchstore.TypeDashboard, UID: "d3"},
{Type: searchstore.TypeDashboard, UID: "d4"},
{Type: searchstore.TypeFolder, UID: "folder1"},
{Type: searchstore.TypeFolder, UID: "folder2"},
},
},
{
name: "single folder, only deletes dashboards defined in the config file",
provisionedFolders: []string{"folder1"},
configDashboards: []string{"Provisioned1", "Provisioned2"},
provisionedResources: []dashboards.DashboardSearchProjection{
provisioned("d1", "Provisioned1", isDashboard),
provisioned("d2", "Provisioned2", isDashboard),
provisioned("d3", "Provisioned1", isDashboard),
provisioned("d4", "Provisioned4", isDashboard),
provisioned("d5", "Provisioned4", isDashboard),
},
// Delete duplicated dashboards, but keep Provisioned4, since it's not in the config file.
expectedSteps: []deleteProvisionedResource{
{Type: searchstore.TypeDashboard, UID: "d1"},
{Type: searchstore.TypeDashboard, UID: "d2"},
{Type: searchstore.TypeDashboard, UID: "d3"},
},
},
{
name: "single folder, no duplicated dashboards",
provisionedFolders: []string{"folder1"},
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
provisionedResources: []dashboards.DashboardSearchProjection{
provisioned("d1", "Provisioned1", isDashboard),
provisioned("d2", "Provisioned2", isDashboard),
provisioned("d3", "Provisioned3", isDashboard),
provisioned("d4", "Provisioned4", isDashboard),
},
expectedSteps: nil, // no duplicates, nothing to do
},
{
name: "single folder, no duplicated dashboards, multiple user-created resources",
provisionedFolders: []string{"folder1"},
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
provisionedResources: []dashboards.DashboardSearchProjection{
provisioned("d1", "Provisioned1", isDashboard),
provisioned("d2", "Provisioned2", isDashboard),
fromUser("f1", "UserFolder1", isFolder),
provisioned("d3", "Provisioned3", isDashboard),
fromUser("d4", "User1", isDashboard),
provisioned("d5", "Provisioned4", isDashboard),
fromUser("d6", "User2", isDashboard),
fromUser("f2", "UserFolder2", isFolder),
},
expectedSteps: nil, // no duplicates in the provisioned set, nothing to do
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
provisionedSet := make(map[string]bool)
for _, name := range tc.configDashboards {
provisionedSet[name] = true
}
steps, err := cleanupSteps(tc.provisionedFolders, tc.provisionedResources, provisionedSet)
if tc.expectedErr == "" {
require.NoError(t, err)
require.Equal(t, tc.expectedSteps, steps)
} else {
require.Error(t, err)
require.Equal(t, tc.expectedErr, err.Error())
}
})
}
}

View File

@@ -202,6 +202,11 @@ func (s *Service) searchFoldersFromApiServer(ctx context.Context, query folder.S
if query.Title != "" {
// allow wildcard search
request.Query = "*" + strings.ToLower(query.Title) + "*"
// or perform exact match if requested
if query.TitleExactMatch {
request.Query = query.Title
}
// if using query, you need to specify the fields you want
request.Fields = dashboardsearch.IncludeFields
}

View File

@@ -224,12 +224,13 @@ type GetFoldersQuery struct {
}
type SearchFoldersQuery struct {
OrgID int64
UIDs []string
IDs []int64
Title string
Limit int64
SignedInUser identity.Requester `json:"-"`
OrgID int64
UIDs []string
IDs []int64
Title string
TitleExactMatch bool
Limit int64
SignedInUser identity.Requester `json:"-"`
}
// GetParentsQuery captures the information required by the folder service to

View File

@@ -153,13 +153,20 @@ func (provider *Provisioner) Provision(ctx context.Context) error {
// CleanUpOrphanedDashboards deletes provisioned dashboards missing a linked reader.
func (provider *Provisioner) CleanUpOrphanedDashboards(ctx context.Context) {
currentReaders := make([]string, len(provider.fileReaders))
configs := make([]dashboards.ProvisioningConfig, len(provider.fileReaders))
for index, reader := range provider.fileReaders {
currentReaders[index] = reader.Cfg.Name
configs[index] = dashboards.ProvisioningConfig{
Name: reader.Cfg.Name,
OrgID: reader.Cfg.OrgID,
Folder: reader.Cfg.Folder,
AllowUIUpdates: reader.Cfg.AllowUIUpdates,
}
}
if err := provider.provisioner.DeleteOrphanedProvisionedDashboards(ctx, &dashboards.DeleteOrphanedProvisionedDashboardsCommand{ReaderNames: currentReaders}); err != nil {
if err := provider.provisioner.DeleteOrphanedProvisionedDashboards(
ctx, &dashboards.DeleteOrphanedProvisionedDashboardsCommand{Config: configs},
); err != nil {
provider.log.Warn("Failed to delete orphaned provisioned dashboards", "err", err)
}
}

View File

@@ -2,11 +2,8 @@ package migrations
import (
"fmt"
"strings"
"github.com/bwmarrin/snowflake"
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
"github.com/grafana/grafana/pkg/util/xorm"
)
func initResourceTables(mg *migrator.Migrator) string {
@@ -207,142 +204,5 @@ func initResourceTables(mg *migrator.Migrator) string {
Name: "IDX_resource_history_key_path",
}))
mg.AddMigration("resource_history key_path backfill", &ResourceHistoryKeyPathBackfillMigration{})
return marker
}
type ResourceHistoryKeyPathBackfillMigration struct {
migrator.MigrationBase
}
func (m *ResourceHistoryKeyPathBackfillMigration) SQL(_ migrator.Dialect) string {
return "resource_history key_path backfill code migration"
}
func (m *ResourceHistoryKeyPathBackfillMigration) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
rows, err := getResourceHistoryRows(sess, mg, resourceHistoryRow{})
if err != nil {
return err
}
for len(rows) > 0 {
if err := updateResourceHistoryKeyPath(sess, rows); err != nil {
return err
}
rows, err = getResourceHistoryRows(sess, mg, rows[len(rows)-1])
if err != nil {
return err
}
}
return nil
}
func updateResourceHistoryKeyPath(sess *xorm.Session, rows []resourceHistoryRow) error {
if len(rows) == 0 {
return nil
}
updates := []resourceHistoryRow{}
for _, row := range rows {
if row.KeyPath == "" {
row.KeyPath = parseKeyPath(row)
updates = append(updates, row)
}
}
if len(updates) == 0 {
return nil
}
guids := ""
setCases := "CASE"
for _, row := range updates {
guids += fmt.Sprintf("'%s',", row.GUID)
setCases += fmt.Sprintf(" WHEN guid = '%s' THEN '%s'", row.GUID, row.KeyPath)
}
guids = strings.TrimRight(guids, ",")
setCases += " ELSE key_path END "
// the query will look like this
// UPDATE resource_history
// SET key_path = CASE
// WHEN guid = '1402de51-669b-4206-8a6c-005a00eee6e3' then 'unified/data/folder.grafana.app/folders/default/cf6lylpvls000c/1998492888241012800~created~'
// WHEN guid = '8842cc56-f22b-45e1-82b1-99759cd443b3' then 'unified/data/dashboard.grafana.app/dashboards/default/adzvfhp/1998492902577144677~created~cf6lylpvls000c'
// ELSE key_path END
// WHERE guid IN ('1402de51-669b-4206-8a6c-005a00eee6e3', '8842cc56-f22b-45e1-82b1-99759cd443b3')
// AND key_path = '';
sql := fmt.Sprintf(`
UPDATE resource_history
SET key_path = %s
WHERE guid IN (%s)
AND key_path = '';
`, setCases, guids)
if _, err := sess.Exec(sql); err != nil {
return err
}
return nil
}
func parseKeyPath(row resourceHistoryRow) string {
var action string
switch row.Action {
case 1:
action = "created"
case 2:
action = "updated"
case 3:
action = "deleted"
}
return fmt.Sprintf("unified/data/%s/%s/%s/%s/%d~%s~%s", row.Group, row.Resource, row.Namespace, row.Name, snowflakeFromRv(row.ResourceVersion), action, row.Folder)
}
func snowflakeFromRv(rv int64) int64 {
return (((rv / 1000) - snowflake.Epoch) << (snowflake.NodeBits + snowflake.StepBits)) + (rv % 1000)
}
type resourceHistoryRow struct {
GUID string `xorm:"guid"`
Group string `xorm:"group"`
Resource string `xorm:"resource"`
Namespace string `xorm:"namespace"`
Name string `xorm:"name"`
ResourceVersion int64 `xorm:"resource_version"`
Action int64 `xorm:"action"`
Folder string `xorm:"folder"`
KeyPath string `xorm:"key_path"`
}
func getResourceHistoryRows(sess *xorm.Session, mg *migrator.Migrator, continueRow resourceHistoryRow) ([]resourceHistoryRow, error) {
var rows []resourceHistoryRow
cols := fmt.Sprintf(
"%s, %s, %s, %s, %s, %s, %s, %s, %s",
mg.Dialect.Quote("guid"),
mg.Dialect.Quote("group"),
mg.Dialect.Quote("resource"),
mg.Dialect.Quote("namespace"),
mg.Dialect.Quote("name"),
mg.Dialect.Quote("resource_version"),
mg.Dialect.Quote("action"),
mg.Dialect.Quote("folder"),
mg.Dialect.Quote("key_path"))
sql := fmt.Sprintf(`
SELECT %s
FROM resource_history
WHERE (resource_version > %d OR (resource_version = %d AND guid > '%s'))
AND key_path = ''
ORDER BY resource_version ASC, guid ASC
LIMIT 1000;
`, cols, continueRow.ResourceVersion, continueRow.ResourceVersion, continueRow.GUID)
if err := sess.SQL(sql).Find(&rows); err != nil {
return nil, err
}
return rows, nil
}

View File

@@ -262,4 +262,57 @@ describe('TabsLayoutManager', () => {
expect(manager.getVizPanels().length).toBe(1);
});
});
describe('createFromLayout', () => {
it('should convert rows with titles to tabs', () => {
const rowsLayout = new RowsLayoutManager({
rows: [new RowItem({ title: 'Row 1' }), new RowItem({ title: 'Row 2' })],
});
const tabsManager = TabsLayoutManager.createFromLayout(rowsLayout);
expect(tabsManager.state.tabs).toHaveLength(2);
expect(tabsManager.state.tabs[0].state.title).toBe('Row 1');
expect(tabsManager.state.tabs[1].state.title).toBe('Row 2');
});
it('should use default title when row has empty title', () => {
const rowsLayout = new RowsLayoutManager({
rows: [new RowItem({ title: '' })],
});
const tabsManager = TabsLayoutManager.createFromLayout(rowsLayout);
expect(tabsManager.state.tabs).toHaveLength(1);
expect(tabsManager.state.tabs[0].state.title).toBe('New tab');
});
it('should generate unique titles for multiple rows with empty titles', () => {
const rowsLayout = new RowsLayoutManager({
rows: [new RowItem({ title: '' }), new RowItem({ title: '' }), new RowItem({ title: '' })],
});
const tabsManager = TabsLayoutManager.createFromLayout(rowsLayout);
expect(tabsManager.state.tabs).toHaveLength(3);
expect(tabsManager.state.tabs[0].state.title).toBe('New tab');
expect(tabsManager.state.tabs[1].state.title).toBe('New tab 1');
expect(tabsManager.state.tabs[2].state.title).toBe('New tab 2');
});
it('should generate unique titles when mixing empty and existing titles', () => {
const rowsLayout = new RowsLayoutManager({
rows: [
new RowItem({ title: 'New row' }), // existing title that matches default
new RowItem({ title: '' }), // empty, should get unique title
],
});
const tabsManager = TabsLayoutManager.createFromLayout(rowsLayout);
expect(tabsManager.state.tabs).toHaveLength(2);
expect(tabsManager.state.tabs[0].state.title).toBe('New row');
expect(tabsManager.state.tabs[1].state.title).toBe('New tab');
});
});
});

View File

@@ -410,6 +410,10 @@ export class TabsLayoutManager extends SceneObjectBase<TabsLayoutManagerState> i
let tabs: TabItem[] = [];
if (layout instanceof RowsLayoutManager) {
const existingNames = new Set(
layout.state.rows.map((row) => row.state.title).filter((title): title is string => !!title)
);
for (const row of layout.state.rows) {
if (row.state.repeatSourceKey) {
continue;
@@ -420,10 +424,14 @@ export class TabsLayoutManager extends SceneObjectBase<TabsLayoutManagerState> i
// We need to clear the target since we don't want to point the original row anymore (if it was set)
conditionalRendering?.setTarget(undefined);
const newTitle =
row.state.title || generateUniqueTitle(t('dashboard.tabs-layout.tab.new', 'New tab'), existingNames);
existingNames.add(newTitle);
tabs.push(
new TabItem({
layout: row.state.layout.clone(),
title: row.state.title,
title: newTitle,
conditionalRendering,
repeatByVariable: row.state.repeatByVariable,
})

View File

@@ -256,7 +256,11 @@ export const InfiniteScroll = ({
if (props.visibleStartIndex === 0) {
noScrollRef.current = scrollElement.scrollHeight <= scrollElement.clientHeight;
}
if (noScrollRef.current || infiniteLoaderState === 'loading' || infiniteLoaderState === 'out-of-bounds') {
if (noScrollRef.current) {
setInfiniteLoaderState('idle');
return;
}
if (infiniteLoaderState === 'loading' || infiniteLoaderState === 'out-of-bounds') {
return;
}
const lastLogIndex = logs.length - 1;
@@ -267,7 +271,7 @@ export const InfiniteScroll = ({
setInfiniteLoaderState('idle');
}
},
[infiniteLoaderState, logs.length, scrollElement]
[infiniteLoaderState, logs, scrollElement]
);
const getItemKey = useCallback((index: number) => (logs[index] ? logs[index].uid : index.toString()), [logs]);

View File

@@ -1,5 +1,5 @@
import { css } from '@emotion/css';
import { useState, useEffect, useCallback, useMemo } from 'react';
import { Fragment, useState, useEffect, useCallback, useMemo } from 'react';
import { useAsync, useMeasure } from 'react-use';
import {
@@ -133,9 +133,9 @@ export function VisualizationSuggestions({ onChange, data, panel }: Props) {
return (
<div className={styles.grid}>
{isNewVizSuggestionsEnabled
? suggestionsByVizType.map(([vizType, vizTypeSuggestions]) => (
<>
<div className={styles.vizTypeHeader} key={vizType?.id || 'unknown-viz-type'}>
? suggestionsByVizType.map(([vizType, vizTypeSuggestions], groupIndex) => (
<Fragment key={vizType?.id || `unknown-viz-type-${groupIndex}`}>
<div className={styles.vizTypeHeader}>
<Text variant="body" weight="medium">
{vizType?.info && <img className={styles.vizTypeLogo} src={vizType.info.logos.small} alt="" />}
{vizType?.name || t('panel.visualization-suggestions.unknown-viz-type', 'Unknown visualization type')}
@@ -190,7 +190,7 @@ export function VisualizationSuggestions({ onChange, data, panel }: Props) {
</div>
);
})}
</>
</Fragment>
))
: suggestions?.map((suggestion, index) => (
<div key={suggestion.hash} className={styles.cardContainer} ref={index === 0 ? firstCardRef : undefined}>