Compare commits

..

13 Commits

Author SHA1 Message Date
Tom Ratcliffe
688d6746c9 --wip-- 2025-12-09 14:17:06 +00:00
Marc M.
e8f1eb1ee8 Sidebar: Fix resizing (#114597) 2025-12-02 11:51:26 +01:00
Peter Štibraný
c0f8e5688b Delete pkg/services/sqlstore/migrator/snapshot directory (#114687)
This is no longer used.
2025-12-02 11:41:53 +01:00
Sonia Aguilar
6e7f28f5a1 Alerting: Fix label values not being shown in the label drop down (#114642)
* fix label values not being shown in the label drop down

* update fetching gops label values

* improve tests

* add test
2025-12-02 10:19:25 +00:00
Sonia Aguilar
9dcad9c255 Alerting: Update prompt for the button 'Analyze rule with assistant' button (#114593)
* Update analyze rule prompt

* Conditionally apply alerts prompt

---------

Co-authored-by: Konrad Lalik <konradlalik@gmail.com>
2025-12-02 11:12:24 +01:00
Sonia Aguilar
f8f4fb5640 Alerting: Fix advanced filter not preserving freewords filter in the list view (#114651)
fix custom advanced filter not preserving freewords filter in the list view
2025-12-02 10:59:47 +01:00
Charandas
d5de92e5b2 fix: propagate service identity correctly for requester (#114423)
* fix: propagate service identity correctly for requester

* revert
2025-12-02 10:38:37 +01:00
Paul Marbach
e36ea78771 Suggestions: Deprecate the old API and put external suggestions behind a flag (#114127)
Some checks failed
Frontend performance tests / performance-tests (push) Has been cancelled
Actionlint / Lint GitHub Actions files (push) Has been cancelled
Backend Code Checks / Detect whether code changed (push) Has been cancelled
Backend Code Checks / Validate Backend Configs (push) Has been cancelled
Backend Unit Tests / Detect whether code changed (push) Has been cancelled
Backend Unit Tests / Grafana (1/8) (push) Has been cancelled
Backend Unit Tests / Grafana (2/8) (push) Has been cancelled
Backend Unit Tests / Grafana (3/8) (push) Has been cancelled
Backend Unit Tests / Grafana (4/8) (push) Has been cancelled
Backend Unit Tests / Grafana (5/8) (push) Has been cancelled
Backend Unit Tests / Grafana (6/8) (push) Has been cancelled
Backend Unit Tests / Grafana (7/8) (push) Has been cancelled
Backend Unit Tests / Grafana (8/8) (push) Has been cancelled
Backend Unit Tests / Grafana Enterprise (1/8) (push) Has been cancelled
Backend Unit Tests / Grafana Enterprise (2/8) (push) Has been cancelled
Backend Unit Tests / Grafana Enterprise (3/8) (push) Has been cancelled
Backend Unit Tests / Grafana Enterprise (4/8) (push) Has been cancelled
Backend Unit Tests / Grafana Enterprise (5/8) (push) Has been cancelled
Backend Unit Tests / Grafana Enterprise (6/8) (push) Has been cancelled
Backend Unit Tests / Grafana Enterprise (7/8) (push) Has been cancelled
Backend Unit Tests / Grafana Enterprise (8/8) (push) Has been cancelled
Backend Unit Tests / All backend unit tests complete (push) Has been cancelled
CodeQL checks / Detect whether code changed (push) Has been cancelled
CodeQL checks / Analyze (actions) (push) Has been cancelled
CodeQL checks / Analyze (go) (push) Has been cancelled
CodeQL checks / Analyze (javascript) (push) Has been cancelled
Lint Frontend / Detect whether code changed (push) Has been cancelled
Lint Frontend / Lint (push) Has been cancelled
Lint Frontend / Typecheck (push) Has been cancelled
Lint Frontend / Verify API clients (push) Has been cancelled
Lint Frontend / Verify API clients (enterprise) (push) Has been cancelled
golangci-lint / Detect whether code changed (push) Has been cancelled
golangci-lint / go-fmt (push) Has been cancelled
golangci-lint / lint-go (push) Has been cancelled
Verify i18n / verify-i18n (push) Has been cancelled
End-to-end tests / Detect whether code changed (push) Has been cancelled
End-to-end tests / Build & Package Grafana (push) Has been cancelled
End-to-end tests / Build E2E test runner (push) Has been cancelled
End-to-end tests / push-docker-image (push) Has been cancelled
End-to-end tests / dashboards-suite (old arch) (push) Has been cancelled
End-to-end tests / panels-suite (old arch) (push) Has been cancelled
End-to-end tests / smoke-tests-suite (old arch) (push) Has been cancelled
End-to-end tests / various-suite (old arch) (push) Has been cancelled
End-to-end tests / Verify Storybook (Playwright) (push) Has been cancelled
End-to-end tests / Playwright E2E tests (1/8) (push) Has been cancelled
End-to-end tests / Playwright E2E tests (2/8) (push) Has been cancelled
End-to-end tests / Playwright E2E tests (3/8) (push) Has been cancelled
End-to-end tests / Playwright E2E tests (4/8) (push) Has been cancelled
End-to-end tests / Playwright E2E tests (5/8) (push) Has been cancelled
End-to-end tests / Playwright E2E tests (6/8) (push) Has been cancelled
End-to-end tests / Playwright E2E tests (7/8) (push) Has been cancelled
End-to-end tests / Playwright E2E tests (8/8) (push) Has been cancelled
End-to-end tests / run-azure-monitor-e2e (push) Has been cancelled
End-to-end tests / All Playwright tests complete (push) Has been cancelled
End-to-end tests / A11y test (push) Has been cancelled
End-to-end tests / Publish metrics (push) Has been cancelled
End-to-end tests / All E2E tests complete (push) Has been cancelled
Frontend tests / Detect whether code changed (push) Has been cancelled
Frontend tests / Unit tests (1 / 16) (push) Has been cancelled
Frontend tests / Unit tests (10 / 16) (push) Has been cancelled
Frontend tests / Unit tests (11 / 16) (push) Has been cancelled
Frontend tests / Unit tests (12 / 16) (push) Has been cancelled
Frontend tests / Unit tests (13 / 16) (push) Has been cancelled
Frontend tests / Unit tests (14 / 16) (push) Has been cancelled
Frontend tests / Unit tests (15 / 16) (push) Has been cancelled
Frontend tests / Unit tests (16 / 16) (push) Has been cancelled
Frontend tests / Unit tests (2 / 16) (push) Has been cancelled
Frontend tests / Unit tests (3 / 16) (push) Has been cancelled
Frontend tests / Unit tests (4 / 16) (push) Has been cancelled
Frontend tests / Unit tests (5 / 16) (push) Has been cancelled
Frontend tests / Unit tests (6 / 16) (push) Has been cancelled
Frontend tests / Unit tests (7 / 16) (push) Has been cancelled
Frontend tests / Unit tests (8 / 16) (push) Has been cancelled
Frontend tests / Unit tests (9 / 16) (push) Has been cancelled
Frontend tests / Decoupled plugin tests (push) Has been cancelled
Frontend tests / Packages unit tests (push) Has been cancelled
Frontend tests / All frontend unit tests complete (push) Has been cancelled
Frontend tests / Devenv frontend-service build (push) Has been cancelled
Integration Tests / Detect whether code changed (push) Has been cancelled
Integration Tests / Sqlite (1/4) (push) Has been cancelled
Integration Tests / Sqlite (2/4) (push) Has been cancelled
Integration Tests / Sqlite (3/4) (push) Has been cancelled
Integration Tests / Sqlite (4/4) (push) Has been cancelled
Integration Tests / Sqlite Without CGo (1/4) (push) Has been cancelled
Integration Tests / Sqlite Without CGo (2/4) (push) Has been cancelled
Integration Tests / Sqlite Without CGo (3/4) (push) Has been cancelled
Integration Tests / Sqlite Without CGo (4/4) (push) Has been cancelled
Integration Tests / Sqlite Without CGo (profiled) (push) Has been cancelled
Integration Tests / MySQL (1/16) (push) Has been cancelled
Integration Tests / MySQL (10/16) (push) Has been cancelled
Integration Tests / MySQL (11/16) (push) Has been cancelled
Integration Tests / MySQL (12/16) (push) Has been cancelled
Integration Tests / MySQL (13/16) (push) Has been cancelled
Integration Tests / MySQL (14/16) (push) Has been cancelled
Integration Tests / MySQL (15/16) (push) Has been cancelled
Integration Tests / MySQL (16/16) (push) Has been cancelled
Integration Tests / MySQL (2/16) (push) Has been cancelled
Integration Tests / MySQL (3/16) (push) Has been cancelled
Integration Tests / MySQL (4/16) (push) Has been cancelled
Integration Tests / MySQL (5/16) (push) Has been cancelled
Integration Tests / MySQL (6/16) (push) Has been cancelled
Integration Tests / MySQL (7/16) (push) Has been cancelled
Integration Tests / MySQL (8/16) (push) Has been cancelled
Integration Tests / MySQL (9/16) (push) Has been cancelled
Integration Tests / Postgres (1/16) (push) Has been cancelled
Integration Tests / Postgres (10/16) (push) Has been cancelled
Integration Tests / Postgres (11/16) (push) Has been cancelled
Integration Tests / Postgres (12/16) (push) Has been cancelled
Integration Tests / Postgres (13/16) (push) Has been cancelled
Integration Tests / Postgres (14/16) (push) Has been cancelled
Integration Tests / Postgres (15/16) (push) Has been cancelled
Integration Tests / Postgres (16/16) (push) Has been cancelled
Integration Tests / Postgres (2/16) (push) Has been cancelled
Integration Tests / Postgres (3/16) (push) Has been cancelled
Integration Tests / Postgres (4/16) (push) Has been cancelled
Integration Tests / Postgres (5/16) (push) Has been cancelled
Integration Tests / Postgres (6/16) (push) Has been cancelled
Integration Tests / Postgres (7/16) (push) Has been cancelled
Integration Tests / Postgres (8/16) (push) Has been cancelled
Integration Tests / Postgres (9/16) (push) Has been cancelled
Integration Tests / All backend integration tests complete (push) Has been cancelled
publish-kinds-next / main (push) Has been cancelled
Reject GitHub secrets / reject-gh-secrets (push) Has been cancelled
Build Release Packages / setup (push) Has been cancelled
Build Release Packages / Dispatch grafana-enterprise build (push) Has been cancelled
Build Release Packages / / darwin-amd64 (push) Has been cancelled
Build Release Packages / / darwin-arm64 (push) Has been cancelled
Build Release Packages / / linux-amd64 (push) Has been cancelled
Build Release Packages / / linux-armv6 (push) Has been cancelled
Build Release Packages / / linux-armv7 (push) Has been cancelled
Build Release Packages / / linux-arm64 (push) Has been cancelled
Build Release Packages / / linux-s390x (push) Has been cancelled
Build Release Packages / / windows-amd64 (push) Has been cancelled
Build Release Packages / / windows-arm64 (push) Has been cancelled
Build Release Packages / Upload artifacts (push) Has been cancelled
Build Release Packages / publish-dockerhub (push) Has been cancelled
Build Release Packages / Dispatch publish NPM canaries (push) Has been cancelled
Build Release Packages / notify-pr (push) Has been cancelled
Run dashboard schema v2 e2e / dashboard-schema-v2-e2e (push) Has been cancelled
Shellcheck / Shellcheck scripts (push) Has been cancelled
Run Storybook a11y tests / Detect whether code changed (push) Has been cancelled
Run Storybook a11y tests / Run Storybook a11y tests (light theme) (push) Has been cancelled
Run Storybook a11y tests / Run Storybook a11y tests (dark theme) (push) Has been cancelled
Swagger generated code / Detect whether code changed (push) Has been cancelled
Swagger generated code / Verify committed API specs match (push) Has been cancelled
Dispatch sync to mirror / dispatch-job (push) Has been cancelled
trigger-dashboard-search-e2e / trigger-search-e2e (push) Has been cancelled
Close stale issues and PRs / stale (push) Has been cancelled
Documentation / Build & Verify Docs (push) Has been cancelled
publish-technical-documentation-next / sync (push) Has been cancelled
Update `make docs` procedure / main (push) Has been cancelled
Update Schema Types / bundle-schema-types (push) Has been cancelled
* Suggestions: Deprecate previous API, enable external plugin suggestions behind flag

* fix types for deprecated builder

* restore some support for cloud-onboarding

* add support for cloud-onboarding usage, add test to ensure it keeps working

* refactor to not hardcode on 'core:'

* remove unused import
2025-12-01 23:22:22 +00:00
Steve Simpson
b332a108f3 Alerting: Notification history query API. (#114677)
* Alerting: Notification history query API.

First cut at defining a namespace scoped route on the historian.alerting app
to query notification history.

* Address review comments
2025-12-02 00:14:54 +01:00
Todd Treece
1060dd538a CI: Run lint on self-hosted ubuntu-x64-small (#114674) 2025-12-01 22:27:14 +00:00
Todd Treece
be8076dee8 CI: Run lint on ubuntu-latest-8-cores (#114673) 2025-12-01 21:40:46 +00:00
Ashley Harrison
7f1ac6188a PanelChrome: Wrapping div needs height: 100% as well (#114655)
wrapping div needs height: 100% as well
2025-12-01 17:39:15 +00:00
Rafael Bortolon Paulovic
31eaf1e898 chore: add log and metric before unified migration enforcement (#114598) 2025-12-01 17:56:59 +01:00
111 changed files with 2639 additions and 2757 deletions

View File

@@ -57,7 +57,7 @@ jobs:
lint-go:
needs: detect-changes
if: needs.detect-changes.outputs.changed == 'true'
runs-on: ubuntu-latest
runs-on: ubuntu-x64-large-io
steps:
- uses: actions/checkout@v5
with:

View File

@@ -1,34 +1,20 @@
package kinds
import (
"github.com/grafana/grafana/apps/alerting/historian/kinds/v0alpha1"
)
manifest: {
appName: "alerting-historian"
groupOverride: "historian.alerting.grafana.app"
versions: {
"v0alpha1": v0alpha1
"v0alpha1": {
kinds: [dummyv0alpha1]
routes: v0alpha1.routes
}
}
}
v0alpha1: {
kinds: [dummyv0alpha1]
routes: {
namespaced: {
// This endpoint is an exact copy of the existing /history endpoint,
// with the exception that error responses will be Kubernetes-style,
// not Grafana-style. It will be replaced in the future with a better
// more schema-friendly API.
"/alertstate/history": {
"GET": {
response: {
body: [string]: _
}
responseMetadata: typeMeta: false
}
}
}
}
}
dummyv0alpha1: {
kind: "Dummy"
schema: {
@@ -37,4 +23,4 @@ dummyv0alpha1: {
dummyField: int
}
}
}
}

View File

@@ -0,0 +1,9 @@
package v0alpha1
#Matcher: {
type: "=" | "!=" | "=~" | "!~" @cuetsy(kind="enum",memberNames="Equal|NotEqual|EqualRegex|NotEqualRegex")
label: string
value: string
}
#Matchers: [...#Matcher]

View File

@@ -0,0 +1,65 @@
package v0alpha1
import (
"time"
)
#NotificationStatus: "firing" | "resolved" @cog(kind="enum",memberNames="Firing|Resolved")
#NotificationOutcome: "success" | "error" @cog(kind="enum",memberNames="Success|Error")
#NotificationQuery: {
// From is the starting timestamp for the query.
from?: time.Time
// To is the starting timestamp for the query.
to?: time.Time
// Limit is the maximum number of entries to return.
limit?: int64
// Receiver optionally filters the entries by receiver title (contact point).
receiver?: string
// Status optionally filters the entries to only either firing or resolved.
status?: #NotificationStatus
// Outcome optionally filters the entries to only either successful or failed attempts.
outcome?: #NotificationOutcome
// RuleUID optionally filters the entries to a specific alert rule.
ruleUID?: string
// GroupLabels optionally filters the entries by matching group labels.
groupLabels?: #Matchers
}
#NotificationQueryResult: {
entries: [...#NotificationEntry]
}
#NotificationEntry: {
// Timestamp is the time at which the notification attempt completed.
timestamp: time.Time
// Receiver is the receiver (contact point) title.
receiver: string
// Status indicates if the notification contains one or more firing alerts.
status: #NotificationStatus
// Outcome indicaes if the notificaion attempt was successful or if it failed.
outcome: #NotificationOutcome
// GroupLabels are the labels uniquely identifying the alert group within a route.
groupLabels: [string]: string
// Alerts are the alerts grouped into the notification.
alerts: [...#NotificationEntryAlert]
// Retry indicates if the attempt was a retried attempt.
retry: bool
// Error is the message returned by the contact point if delivery failed.
error?: string
// Duration is the length of time the notification attempt took in nanoseconds.
duration: int
// PipelineTime is the time at which the flush began.
pipelineTime: time.Time
// GroupKey uniquely idenifies the dispatcher alert group.
groupKey: string
}
#NotificationEntryAlert: {
status: string
labels: [string]: string
annotations: [string]: string
startsAt: time.Time
endsAt: time.Time
}

View File

@@ -0,0 +1,29 @@
package v0alpha1
routes: {
namespaced: {
// This endpoint is an exact copy of the existing /history endpoint,
// with the exception that error responses will be Kubernetes-style,
// not Grafana-style. It will be replaced in the future with a better
// more schema-friendly API.
"/alertstate/history": {
"GET": {
response: {
body: [string]: _
}
responseMetadata: typeMeta: false
}
}
// Query notification history.
"/notification/query": {
"POST": {
request: {
body: #NotificationQuery
}
response: #NotificationQueryResult
responseMetadata: typeMeta: false
}
}
}
}

View File

@@ -0,0 +1,67 @@
// Code generated - EDITING IS FUTILE. DO NOT EDIT.
package v0alpha1
import (
time "time"
)
type CreateNotificationqueryRequestNotificationStatus string
const (
CreateNotificationqueryRequestNotificationStatusFiring CreateNotificationqueryRequestNotificationStatus = "firing"
CreateNotificationqueryRequestNotificationStatusResolved CreateNotificationqueryRequestNotificationStatus = "resolved"
)
type CreateNotificationqueryRequestNotificationOutcome string
const (
CreateNotificationqueryRequestNotificationOutcomeSuccess CreateNotificationqueryRequestNotificationOutcome = "success"
CreateNotificationqueryRequestNotificationOutcomeError CreateNotificationqueryRequestNotificationOutcome = "error"
)
type CreateNotificationqueryRequestMatchers []CreateNotificationqueryRequestMatcher
type CreateNotificationqueryRequestMatcher struct {
Type CreateNotificationqueryRequestMatcherType `json:"type"`
Label string `json:"label"`
Value string `json:"value"`
}
// NewCreateNotificationqueryRequestMatcher creates a new CreateNotificationqueryRequestMatcher object.
func NewCreateNotificationqueryRequestMatcher() *CreateNotificationqueryRequestMatcher {
return &CreateNotificationqueryRequestMatcher{}
}
type CreateNotificationqueryRequestBody struct {
// From is the starting timestamp for the query.
From *time.Time `json:"from,omitempty"`
// To is the starting timestamp for the query.
To *time.Time `json:"to,omitempty"`
// Limit is the maximum number of entries to return.
Limit *int64 `json:"limit,omitempty"`
// Receiver optionally filters the entries by receiver title (contact point).
Receiver *string `json:"receiver,omitempty"`
// Status optionally filters the entries to only either firing or resolved.
Status *CreateNotificationqueryRequestNotificationStatus `json:"status,omitempty"`
// Outcome optionally filters the entries to only either successful or failed attempts.
Outcome *CreateNotificationqueryRequestNotificationOutcome `json:"outcome,omitempty"`
// RuleUID optionally filters the entries to a specific alert rule.
RuleUID *string `json:"ruleUID,omitempty"`
// GroupLabels optionally filters the entries by matching group labels.
GroupLabels *CreateNotificationqueryRequestMatchers `json:"groupLabels,omitempty"`
}
// NewCreateNotificationqueryRequestBody creates a new CreateNotificationqueryRequestBody object.
func NewCreateNotificationqueryRequestBody() *CreateNotificationqueryRequestBody {
return &CreateNotificationqueryRequestBody{}
}
type CreateNotificationqueryRequestMatcherType string
const (
CreateNotificationqueryRequestMatcherTypeEqual CreateNotificationqueryRequestMatcherType = "="
CreateNotificationqueryRequestMatcherTypeNotEqual CreateNotificationqueryRequestMatcherType = "!="
CreateNotificationqueryRequestMatcherTypeEqualRegex CreateNotificationqueryRequestMatcherType = "=~"
CreateNotificationqueryRequestMatcherTypeNotEqualRegex CreateNotificationqueryRequestMatcherType = "!~"
)

View File

@@ -0,0 +1,86 @@
// Code generated - EDITING IS FUTILE. DO NOT EDIT.
package v0alpha1
import (
time "time"
)
// +k8s:openapi-gen=true
type NotificationEntry struct {
// Timestamp is the time at which the notification attempt completed.
Timestamp time.Time `json:"timestamp"`
// Receiver is the receiver (contact point) title.
Receiver string `json:"receiver"`
// Status indicates if the notification contains one or more firing alerts.
Status NotificationStatus `json:"status"`
// Outcome indicaes if the notificaion attempt was successful or if it failed.
Outcome NotificationOutcome `json:"outcome"`
// GroupLabels are the labels uniquely identifying the alert group within a route.
GroupLabels map[string]string `json:"groupLabels"`
// Alerts are the alerts grouped into the notification.
Alerts []NotificationEntryAlert `json:"alerts"`
// Retry indicates if the attempt was a retried attempt.
Retry bool `json:"retry"`
// Error is the message returned by the contact point if delivery failed.
Error *string `json:"error,omitempty"`
// Duration is the length of time the notification attempt took in nanoseconds.
Duration int64 `json:"duration"`
// PipelineTime is the time at which the flush began.
PipelineTime time.Time `json:"pipelineTime"`
// GroupKey uniquely idenifies the dispatcher alert group.
GroupKey string `json:"groupKey"`
}
// NewNotificationEntry creates a new NotificationEntry object.
func NewNotificationEntry() *NotificationEntry {
return &NotificationEntry{
GroupLabels: map[string]string{},
Alerts: []NotificationEntryAlert{},
}
}
// +k8s:openapi-gen=true
type NotificationStatus string
const (
NotificationStatusFiring NotificationStatus = "firing"
NotificationStatusResolved NotificationStatus = "resolved"
)
// +k8s:openapi-gen=true
type NotificationOutcome string
const (
NotificationOutcomeSuccess NotificationOutcome = "success"
NotificationOutcomeError NotificationOutcome = "error"
)
// +k8s:openapi-gen=true
type NotificationEntryAlert struct {
Status string `json:"status"`
Labels map[string]string `json:"labels"`
Annotations map[string]string `json:"annotations"`
StartsAt time.Time `json:"startsAt"`
EndsAt time.Time `json:"endsAt"`
}
// NewNotificationEntryAlert creates a new NotificationEntryAlert object.
func NewNotificationEntryAlert() *NotificationEntryAlert {
return &NotificationEntryAlert{
Labels: map[string]string{},
Annotations: map[string]string{},
}
}
// +k8s:openapi-gen=true
type CreateNotificationquery struct {
Entries []NotificationEntry `json:"entries"`
}
// NewCreateNotificationquery creates a new CreateNotificationquery object.
func NewCreateNotificationquery() *CreateNotificationquery {
return &CreateNotificationquery{
Entries: []NotificationEntry{},
}
}

View File

@@ -92,9 +92,321 @@ var appManifestData = app.ManifestData{
},
},
},
"/notification/query": {
Post: &spec3.Operation{
OperationProps: spec3.OperationProps{
OperationId: "createNotificationquery",
RequestBody: &spec3.RequestBody{
RequestBodyProps: spec3.RequestBodyProps{
Content: map[string]*spec3.MediaType{
"application/json": {
MediaTypeProps: spec3.MediaTypeProps{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"from": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "date-time",
Description: "From is the starting timestamp for the query.",
},
},
"groupLabels": {
SchemaProps: spec.SchemaProps{
Description: "GroupLabels optionally filters the entries by matching group labels.",
Ref: spec.MustCreateRef("#/components/schemas/createNotificationqueryMatchers"),
},
},
"limit": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Description: "Limit is the maximum number of entries to return.",
},
},
"outcome": {
SchemaProps: spec.SchemaProps{
Description: "Outcome optionally filters the entries to only either successful or failed attempts.",
Ref: spec.MustCreateRef("#/components/schemas/createNotificationqueryNotificationOutcome"),
},
},
"receiver": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Description: "Receiver optionally filters the entries by receiver title (contact point).",
},
},
"ruleUID": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Description: "RuleUID optionally filters the entries to a specific alert rule.",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status optionally filters the entries to only either firing or resolved.",
Ref: spec.MustCreateRef("#/components/schemas/createNotificationqueryNotificationStatus"),
},
},
"to": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "date-time",
Description: "To is the starting timestamp for the query.",
},
},
},
}},
}},
},
}},
Responses: &spec3.Responses{
ResponsesProps: spec3.ResponsesProps{
Default: &spec3.Response{
ResponseProps: spec3.ResponseProps{
Description: "Default OK response",
Content: map[string]*spec3.MediaType{
"application/json": {
MediaTypeProps: spec3.MediaTypeProps{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"entries": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
},
},
},
Required: []string{
"entries",
},
}},
}},
},
},
},
}},
},
},
},
},
Cluster: map[string]spec3.PathProps{},
Schemas: map[string]spec.Schema{},
Schemas: map[string]spec.Schema{
"createNotificationqueryMatcher": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"label": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
"type": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Enum: []interface{}{
"=",
"!=",
"=~",
"!~",
},
},
},
"value": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
},
Required: []string{
"type",
"label",
"value",
},
},
},
"createNotificationqueryMatchers": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
},
},
"createNotificationqueryNotificationEntry": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"alerts": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Description: "Alerts are the alerts grouped into the notification.",
},
},
"duration": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Description: "Duration is the length of time the notification attempt took in nanoseconds.",
},
},
"error": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Description: "Error is the message returned by the contact point if delivery failed.",
},
},
"groupKey": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Description: "GroupKey uniquely idenifies the dispatcher alert group.",
},
},
"groupLabels": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Description: "GroupLabels are the labels uniquely identifying the alert group within a route.",
AdditionalProperties: &spec.SchemaOrBool{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
},
},
},
"outcome": {
SchemaProps: spec.SchemaProps{
Description: "Outcome indicaes if the notificaion attempt was successful or if it failed.",
Ref: spec.MustCreateRef("#/components/schemas/createNotificationqueryNotificationOutcome"),
},
},
"pipelineTime": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "date-time",
Description: "PipelineTime is the time at which the flush began.",
},
},
"receiver": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Description: "Receiver is the receiver (contact point) title.",
},
},
"retry": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Description: "Retry indicates if the attempt was a retried attempt.",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status indicates if the notification contains one or more firing alerts.",
Ref: spec.MustCreateRef("#/components/schemas/createNotificationqueryNotificationStatus"),
},
},
"timestamp": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "date-time",
Description: "Timestamp is the time at which the notification attempt completed.",
},
},
},
Required: []string{
"timestamp",
"receiver",
"status",
"outcome",
"groupLabels",
"alerts",
"retry",
"duration",
"pipelineTime",
"groupKey",
},
},
},
"createNotificationqueryNotificationEntryAlert": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"annotations": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
},
},
},
"endsAt": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "date-time",
},
},
"labels": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
},
},
},
"startsAt": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "date-time",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
},
Required: []string{
"status",
"labels",
"annotations",
"startsAt",
"endsAt",
},
},
},
"createNotificationqueryNotificationOutcome": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Enum: []interface{}{
"success",
"error",
},
},
},
"createNotificationqueryNotificationStatus": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Enum: []interface{}{
"firing",
"resolved",
},
},
},
},
},
},
},
@@ -120,7 +432,8 @@ func ManifestGoTypeAssociator(kind, version string) (goType resource.Kind, exist
}
var customRouteToGoResponseType = map[string]any{
"v0alpha1||<namespace>/alertstate/history|GET": v0alpha1.GetAlertstatehistory{},
"v0alpha1||<namespace>/alertstate/history|GET": v0alpha1.GetAlertstatehistory{},
"v0alpha1||<namespace>/notification/query|POST": v0alpha1.CreateNotificationquery{},
}
// ManifestCustomRouteResponsesAssociator returns the associated response go type for a given kind, version, custom route path, and method, if one exists.
@@ -145,7 +458,9 @@ func ManifestCustomRouteQueryAssociator(kind, version, path, verb string) (goTyp
return goType, exists
}
var customRouteToGoRequestBodyType = map[string]any{}
var customRouteToGoRequestBodyType = map[string]any{
"v0alpha1||<namespace>/notification/query|POST": v0alpha1.CreateNotificationqueryRequestBody{},
}
func ManifestCustomRouteRequestBodyAssociator(kind, version, path, verb string) (goType any, exists bool) {
if len(path) > 0 && path[0] == '/' {

View File

@@ -1,8 +1,13 @@
package app
import (
"context"
"net/http"
"github.com/grafana/grafana-app-sdk/app"
"github.com/grafana/grafana-app-sdk/simple"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/grafana/grafana/apps/alerting/historian/pkg/apis/alertinghistorian/v0alpha1"
"github.com/grafana/grafana/apps/alerting/historian/pkg/app/config"
@@ -21,6 +26,11 @@ func New(cfg app.Config) (app.App, error) {
Path: "/alertstate/history",
Method: "GET",
}: runtimeConfig.GetAlertStateHistoryHandler,
{
Namespaced: true,
Path: "/notification/query",
Method: "POST",
}: UnimplementedHandler,
},
},
// TODO: Remove when SDK is fixed.
@@ -43,3 +53,13 @@ func New(cfg app.Config) (app.App, error) {
return a, nil
}
func UnimplementedHandler(ctx context.Context, writer app.CustomRouteResponseWriter, request *app.CustomRouteRequest) error {
return &apierrors.StatusError{
ErrStatus: metav1.Status{
Status: metav1.StatusFailure,
Code: http.StatusUnprocessableEntity,
Message: "unimplemented",
},
}
}

View File

@@ -53,6 +53,7 @@ pluginMetaV0Alpha1: {
skipDataQuery?: bool
state?: "alpha" | "beta"
streaming?: bool
suggestions?: bool
tracing?: bool
iam?: #IAM
// +listType=atomic

View File

@@ -40,6 +40,7 @@ type PluginMetaJSONData struct {
SkipDataQuery *bool `json:"skipDataQuery,omitempty"`
State *PluginMetaJSONDataState `json:"state,omitempty"`
Streaming *bool `json:"streaming,omitempty"`
Suggestions *bool `json:"suggestions,omitempty"`
Tracing *bool `json:"tracing,omitempty"`
Iam *PluginMetaIAM `json:"iam,omitempty"`
// +listType=atomic

File diff suppressed because one or more lines are too long

View File

@@ -341,6 +341,10 @@
"type": "boolean",
"description": "Initialize plugin on startup. By default, the plugin initializes on first use, but when preload is set to true the plugin loads when the Grafana web app loads the first time. Only applicable to app plugins. When setting to `true`, implement [frontend code splitting](https://grafana.com/developers/plugin-tools/get-started/best-practices#app-plugins) to minimise performance implications."
},
"suggestions": {
"type": "boolean",
"description": "For panel plugins. If set to true, the plugin's suggestions supplier will be invoked and any suggestions returned will be included in the Suggestions pane in the Panel Editor."
},
"queryOptions": {
"type": "object",
"description": "For data source plugins. There is a query options section in the plugin's query editor and these options can be turned on if needed.",

View File

@@ -1,68 +0,0 @@
---
aliases:
- ../../../panels-visualizations/query-transform-data/ # /docs/grafana/next/panels-visualizations/query-transform-data/
- ../../../panels-visualizations/query-transform-data/expression-queries/ # /docs/grafana/next/panels-visualizations/query-transform-data/expression-queries/
- ../../../panels/query-a-data-source/use-expressions-to-manipulate-data/ # /docs/grafana/next/panels/query-a-data-source/use-expressions-to-manipulate-data/
- ../../../panels/query-a-data-source/use-expressions-to-manipulate-data/about-expressions/ # /docs/grafana/next/panels/query-a-data-source/use-expressions-to-manipulate-data/about-expressions/
- ../../../panels/query-a-data-source/use-expressions-to-manipulate-data/write-an-expression/ # /docs/grafana/next/panels/query-a-data-source/use-expressions-to-manipulate-data/write-an-expression/
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Expressions
title: Grafana expressions
description: Write server-side expressions to manipulate data using math and other operations
weight: 40
refs:
no-data-and-error-handling:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/#configure-no-data-and-error-handling
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/#configure-no-data-and-error-handling
multiple-dimensional-data:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/timeseries-dimensions/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/timeseries-dimensions/
grafana-alerting:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
labels:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/timeseries-dimensions/#labels
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/timeseries-dimensions/#labels
---
# Grafana expressions
An expression is a server-side operation that takes query results from one or more data sources and transforms them into new data. Expressions perform calculations like math operations, aggregations, or timestamp alignments without modifying the original data source results. This lets you derive metrics, combine data from different sources, and perform transformations your data sources can't do on their own.
By running on the server, expressions also enable features like alerting to continue working even when no user is viewing a dashboard.
## What problems do expressions solve?
Expressions fill the gap between what your data sources can produce and what your visualizations or alerts need.
They address several common challenges:
- **Cross-data-source calculations:** Combine results from different data sources that can't query each other directly. For example, calculate error rates by dividing HTTP errors from Prometheus by total requests from an SQL database.
- **Derived metrics:** Compute values your data source doesn't provide, such as percentage changes, moving averages, ratios, or conditional logic based on thresholds.
- **Alerting on complex conditions:** Apply math, reductions, and comparisons to drive alert rules when your data source lacks the necessary functions or when you need to alert across multiple data sources.
- **Post-query transformations:** Align timestamps between series, resample data to consistent intervals, filter out non-numeric values, or reduce time series to single summary values.
- **Multi-dimensional data operations:** Perform calculations across multiple series while preserving their label identities. For example, apply the same formula to dozens of host metrics without writing individual queries for each host.
- **Label-based series matching:** Automatically join and combine series based on their labels. For example, match CPU metrics and memory metrics for the same hosts by joining on common labels like `host` or `region`.
- **Data quality handling:** Clean your data by filtering out, replacing, or detecting problematic values such as null, NaN, or infinity values before performing calculations or creating alerts.
Without expressions, you'd need to either modify your data source queries (when possible), use client-side transformations (which don't work for alerting), or export and process data externally.
## Get started
Explore these resources to start using expressions:
- [Create and use expressions](create-use-expressions/) - Learn how to create expressions and use Math, Reduce, and Resample operations.
- [Expression examples](expression-examples/) - Practical examples from basic to advanced for common monitoring scenarios.
- [Troubleshoot expressions](troubleshoot-expressions/) - Debug and resolve common expression issues.

View File

@@ -1,254 +0,0 @@
---
aliases:
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Create and use expressions
title: Create and use expressions
description: Learn how to create expressions and use Math, Reduce, and Resample operations
weight: 41
refs:
multiple-dimensional-data:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/timeseries-dimensions/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/timeseries-dimensions/
grafana-alerting:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
---
# Create and use expressions
Expressions are most commonly used for [Grafana Alerting](ref:grafana-alerting), where server-side processing ensures alerts continue working even when no user is viewing a dashboard.
You can also use expressions with backend data sources in visualizations.
## Understand expression data
Before creating expressions, understand the data types and special values you'll work with.
### Data types
Expressions work with two types of data from backend data sources:
- **Time series:** Collections of timestamped values, typically returned by time series databases like Prometheus or InfluxDB.
- **Numbers:** Individual numeric values, such as aggregated results from SQL queries or reduced time series.
Expressions also operate on [multiple-dimensional data](ref:multiple-dimensional-data), where each series or number is identified by labels or tags.
For example, a single query can return CPU metrics for multiple hosts, with each series labeled by its hostname.
### Special values
When working with expressions, you'll encounter special values that represent problematic or undefined data:
- **null:** Represents missing or absent data. Common when a data point doesn't exist or wasn't recorded.
- **NaN (Not a Number):** Represents an undefined or invalid mathematical result, such as dividing zero by zero or taking the logarithm of a negative number. NaN is unique because it doesn't equal itself, which is why expressions include the `is_nan()` function.
- **Infinity (Inf):** Represents values too large to represent as numbers. Can be positive (`Inf`) or negative (`-Inf`). Often results from dividing by zero.
Expressions provide functions like `is_null()`, `is_nan()`, `is_inf()`, and `is_number()` to detect and handle these special values in your data.
### Reference queries and expressions
Each query or expression in Grafana has a unique identifier called a RefID (Reference ID).
RefIDs appear as letters (`A`, `B`, `C`) or custom names in the query editor, and they let you reference the output of one query in another expression.
To use a query or expression in a math operation, prefix its RefID with a dollar sign: `$A`, `$B`, `$C`.
**Example:**
If query `A` returns CPU usage and query `B` returns CPU capacity, you can create an expression `$A / $B * 100` to calculate CPU percentage.
The expression automatically uses the data from queries A and B based on their RefIDs.
## Create an expression
To add an expression to a panel:
1. Open the panel in edit mode.
1. Below your existing queries, click **Expression**.
1. In the **Operation** field, select **Math**, **Reduce**, or **Resample**.
1. Configure the expression based on the operation type.
1. Click **Apply** to save your changes.
The expression appears in your query list with its own RefID and can be referenced by other expressions.
## Expression operations
Expressions provide three core operations that you can combine to transform your data: Math, Reduce, and Resample.
Each operation solves specific data transformation challenges.
### Math
Math operations let you perform calculations on your query results using standard arithmetic, comparison, and logical operators.
Use math expressions to derive new metrics, calculate percentages, or implement conditional logic.
**Common use cases:**
- Calculate error rates: `$errors / $total_requests * 100`
- Convert units: `$bytes / 1024 / 1024` (bytes to megabytes)
- Implement thresholds: `$cpu_usage > 80` (returns 1 for true, 0 for false)
- Calculate capacity remaining: `$max_capacity - $current_usage`
#### Syntax and operators
Reference queries and expressions using their RefID prefixed with a dollar sign: `$A`, `$B`, `$C`.
If a RefID contains spaces, use brace syntax: `${my query}`.
**Supported operators:**
- **Arithmetic:** `+`, `-`, `*`, `/`, `%` (modulo), `**` (exponent)
- **Comparison:** `<`, `>`, `==`, `!=`, `>=`, `<=` (return 1 for true, 0 for false)
- **Logical:** `&&` (and), `||` (or), `!` (not)
**Numeric constants:**
- Decimal: `2.24`, `-0.8e-2`
- Octal: `072` (leading zero)
- Hexadecimal: `0x2A` (leading 0x)
#### How operations work with different data types
Math operations behave differently depending on whether you're working with numbers or time series:
- **Number + Number:** Performs the operation on the two values. Example: `5 + 3 = 8`
- **Number + Time series:** Applies the operation to every point in the series. Example: `$cpu_series * 100` multiplies each CPU value by 100
- **Time series + Time series:** Performs the operation on matching timestamps. Example: `$series_A + $series_B` adds values at each timestamp that exists in both series
If time series have different timestamps, use the Resample operation to align them first.
#### Label-based series matches
When working with multiple series, expressions automatically match series based on their labels.
If query `$A` returns CPU usage for multiple hosts (each with a `{host=...}` label) and query `$B` returns memory usage for the same hosts, the expression `$A + $B` automatically matches each host's CPU and memory values.
**Matching rules:**
- Series with identical labels match automatically
- A series with no labels matches any other series
- Series with subset labels match (for example, `{host=web01}` matches `{host=web01, region=us-east}`)
- If both variables contain only one series, they always match
#### Available functions
Math expressions include functions for common operations and data quality checks.
All functions work with both individual numbers and time series.
**Mathematical functions:**
- `abs(x)` - Returns absolute value. Example: `abs($temperature_diff)`
- `log(x)` - Returns natural logarithm. Returns NaN for negative values. Example: `log($growth_rate)`
- `round(x)` - Rounds to nearest integer. Example: `round($average)`
- `ceil(x)` - Rounds up to nearest integer. Example: `ceil(3.2)` returns `4`
- `floor(x)` - Rounds down to nearest integer. Example: `floor(3.8)` returns `3`
**Data quality functions:**
These functions help you detect and handle problematic values in your data:
- `is_number(x)` - Returns 1 for valid numbers, 0 for null, NaN, or infinity. Example: `is_number($A)`
- `is_null(x)` - Returns 1 for null values, 0 otherwise. Example: `is_null($A)`
- `is_nan(x)` - Returns 1 for NaN values, 0 otherwise. Useful because NaN doesn't equal itself. Example: `is_nan($A)`
- `is_inf(x)` - Returns 1 for positive or negative infinity, 0 otherwise. Example: `is_inf($A)`
**Test functions:**
- `null()`, `nan()`, `inf()`, `infn()` - Return the named special value. Primarily for testing.
### Reduce
Reduce operations convert time series into single numeric values while preserving their labels.
Use reduce to create summary statistics, single-value panels, or alert conditions based on time series data.
**Common use cases:**
- Create alert thresholds: Reduce CPU time series to average and alert if it exceeds 80%
- Display current values: Show the last recorded temperature from a sensor
- Calculate totals: Sum all errors across a time range
- Find extremes: Identify maximum memory usage in the last hour
**Available reduction functions:**
- **Last:** Returns the most recent value. Useful for "current state" displays.
- **Mean:** Returns the average of all values. Use for typical behavior over time.
- **Min / Max:** Returns the smallest or largest value. Useful for capacity planning or finding anomalies.
- **Sum:** Returns the total of all values. Useful for counting events or totaling metrics.
- **Count:** Returns the number of data points. Useful for checking data completeness.
**Example:**
If query `$A` returns CPU usage time series for three hosts over the last hour, applying `Reduce(Mean)` produces three numbers: the average CPU for each host, each labeled with its hostname.
#### Handle non-numeric values
Reduce operations let you control how null, NaN, and infinity values are handled:
- **Strict:** Returns NaN if any non-numeric values exist. Use when data quality is critical.
- **Drop non-numeric:** Filters out problematic values before calculating. Use when occasional bad data points are acceptable.
- **Replace non-numeric:** Replaces bad values with a specified number. Use when you want to substitute a default value.
### Resample
Resample operations align time series to a consistent time interval, enabling you to perform math operations between series with mismatched timestamps.
**Why resample:**
When combining time series from different data sources, their timestamps rarely align perfectly.
One series might report every 15 seconds while another reports every minute.
Resampling normalizes both series to the same interval so you can add, subtract, or compare them.
**Example use case:**
You want to calculate `$errors / $requests` but your error logs report every 10 seconds while your request metrics report every 30 seconds.
Resample both series to 30-second intervals, then perform the division.
**Configuration:**
- **Resample to:** The target interval. Use `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), or `y` (years). Example: `10s`, `1m`, `1h`
- **Downsample:** How to handle multiple data points in one interval. Choose a reduction function like Mean, Max, Min, or Sum. Example: If resampling from 10s to 30s intervals and you have 3 values, Mean averages them.
- **Upsample:** How to fill intervals with no data points:
- **Pad:** Uses the last known value (forward fill)
- **Backfill:** Uses the next known value (backward fill)
- **fillna:** Inserts NaN for missing intervals
## Best practices
Follow these guidelines to build efficient and maintainable expressions.
### Process data in the data source when possible
Perform aggregations, filtering, and complex calculations inside your data source rather than in expressions when you can.
Data sources are optimized for processing their own data, and moving large volumes of data to Grafana for simple operations is inefficient.
**Use expressions for:**
- Operations your data source doesn't support
- Cross-data-source calculations
- Lightweight post-processing
- Alerting logic that needs server-side evaluation
**Avoid expressions for:**
- Simple aggregations your data source can perform
- Processing millions of data points
- Operations that could be handled by recording rules or continuous queries
### Understand backend data source requirements
Expressions only work with backend (server-side) data sources. Browser-based data sources can't be used in expressions.
**Supported:** Prometheus, Loki, InfluxDB, MySQL, PostgreSQL, CloudWatch, and other backend data sources.
**Not supported:** TestData, browser-based plugins, or client-side data sources.
### Use alerting-compatible configurations
Expressions work differently in alerting contexts than in panels:
- Alerting requires expressions to evaluate server-side.
- Most alert conditions need single values (use Reduce operations).
- Test your expressions with the same time ranges your alerts will use.
- Legacy dashboard alerts don't support expressions - use [Grafana Alerting](ref:grafana-alerting) instead.

View File

@@ -1,524 +0,0 @@
---
aliases:
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Expressions examples
title: Expressions examples
description: Practical expression examples from basic to advanced for common monitoring scenarios
weight: 55
refs:
grafana-expressions:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/visualizations/panels-visualizations/query-transform-data/expression-queries/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/visualizations/panels-visualizations/query-transform-data/expression-queries/
grafana-alerting:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
---
# Expressions examples
This document provides practical expression examples for common monitoring and visualization scenarios.
Examples progress from basic to advanced, showing you how to solve real-world problems with Grafana Expressions.
For foundational concepts, refer to [Grafana expressions](ref:grafana-expressions).
## Basic examples
Start here if you're new to expressions. These examples demonstrate fundamental patterns you'll use frequently.
### Convert units
**Scenario:** Your metrics are in bytes, but you want to display them in gigabytes.
**Setup:**
- Query A (Prometheus): `node_memory_MemTotal_bytes`
- Expression B (Math): `$A / 1024 / 1024 / 1024`
**Result:** Memory values converted from bytes to gigabytes.
**Variations:**
- Bytes to megabytes: `$A / 1024 / 1024`
- Bytes to terabytes: `$A / 1024 / 1024 / 1024 / 1024`
- Milliseconds to seconds: `$A / 1000`
- Celsius to Fahrenheit: `$A * 9 / 5 + 32`
---
### Calculate a simple percentage
**Scenario:** Show what percentage of total memory is being used.
**Setup:**
- Query A (Prometheus): `node_memory_MemTotal_bytes`
- Query B (Prometheus): `node_memory_MemAvailable_bytes`
- Expression C (Math): `($A - $B) / $A * 100`
**Result:** Memory usage as a percentage (0-100).
**Tip:** This pattern works for any "used / total * 100" calculation.
---
### Get the current (latest) value
**Scenario:** Display the most recent temperature reading in a stat panel.
**Setup:**
- Query A (InfluxDB): Temperature sensor time series data
- Expression B (Reduce): Input `$A`, Function: **Last**
**Result:** Single number showing the most recent value from the time series.
**When to use:** Stat panels, gauges, or any visualization that needs a single current value.
---
### Calculate an average over time
**Scenario:** Show the average CPU usage over the dashboard time range.
**Setup:**
- Query A (Prometheus): `node_cpu_seconds_total{mode="idle"}`
- Expression B (Reduce): Input `$A`, Function: **Mean**
**Result:** Average CPU value across the selected time range.
**Note:** Each series (each CPU core, each host) produces its own average, preserving labels.
---
### Find maximum or minimum values
**Scenario:** Identify the peak memory usage in the last 24 hours.
**Setup:**
- Query A (Prometheus): `node_memory_MemUsed_bytes` (last 24 hours)
- Expression B (Reduce): Input `$A`, Function: **Max**
**Result:** Peak memory usage value for each host.
**Variations:**
- Use **Min** to find the lowest value
- Use **Count** to see how many data points exist
---
### Simple threshold check
**Scenario:** Create a binary indicator showing whether CPU is above 80%.
**Setup:**
- Query A (Prometheus): `100 - (avg by(instance)(rate(node_cpu_seconds_total{mode="idle"}[5m])) * 100)`
- Expression B (Math): `$A > 80`
**Result:** Returns `1` when CPU exceeds 80%, `0` otherwise. Useful for alerting or status indicators.
---
## Intermediate examples
These examples combine multiple operations and handle more complex scenarios.
### Calculate error rate percentage
**Scenario:** Display HTTP error rate as a percentage of total requests.
**Setup:**
- Query A (Prometheus): `sum(rate(http_requests_total{status=~"5.."}[5m]))`
- Query B (Prometheus): `sum(rate(http_requests_total[5m]))`
- Expression C (Math): `$A / $B * 100`
**Result:** Error rate percentage across all endpoints.
**Handling division by zero:** If there are zero requests, this produces infinity. To handle this:
- Expression C (Math): `$B > 0 ? ($A / $B * 100) : 0`
This returns 0 when there are no requests instead of infinity.
---
### Calculate available disk space
**Scenario:** Show available disk space as a percentage for capacity planning.
**Setup:**
- Query A (Prometheus): `node_filesystem_size_bytes{mountpoint="/"}`
- Query B (Prometheus): `node_filesystem_avail_bytes{mountpoint="/"}`
- Expression C (Math): `$B / $A * 100`
**Result:** Percentage of disk space available (not used) for each host's root filesystem.
**For alerting:** Add an alert when available space drops below 10%:
- Expression D (Math): `$C < 10`
---
### Aggregate across multiple servers
**Scenario:** Calculate total requests per second across all web servers.
**Setup:**
- Query A (Prometheus): `rate(http_requests_total{job="webservers"}[5m])`
- Expression B (Reduce): Input `$A`, Function: **Sum**
**Result:** Total requests per second across all servers combined into a single value.
**Alternative:** To get the average per server instead:
- Expression B (Reduce): Input `$A`, Function: **Mean**
---
### Combine metrics from different data sources
**Scenario:** Calculate efficiency by dividing application throughput (Prometheus) by infrastructure cost metric (CloudWatch).
**Setup:**
- Query A (Prometheus): `sum(rate(processed_jobs_total[5m]))`
- Query B (CloudWatch): EC2 instance cost metric
- Expression C (Resample): Input `$A`, Resample to: `1m`, Downsample: Mean
- Expression D (Resample): Input `$B`, Resample to: `1m`, Downsample: Mean
- Expression E (Math): `$C / $D`
**Result:** Jobs processed per dollar (or cost unit), showing application efficiency.
**Why resample:** Different data sources often have different collection intervals. Resampling ensures timestamps align for math operations.
---
### Compare hosts to fleet average
**Scenario:** Identify hosts performing worse than the fleet average.
**Setup:**
- Query A (Prometheus): `node_cpu_usage_percent` (returns one series per host)
- Expression B (Reduce): Input `$A`, Function: **Mean** (fleet average)
- Expression C (Math): `$A - $B`
**Result:** Each host shows how much above or below the fleet average they are. Positive values indicate above-average CPU usage.
---
### Filter invalid data
**Scenario:** Calculate average response time, ignoring any null or NaN values in the data.
**Setup:**
- Query A (Time series): Response time data with occasional gaps
- Expression B (Reduce): Input `$A`, Function: **Mean**, Mode: **Drop non-numeric**
**Result:** Clean average that ignores invalid data points.
**Alternative modes:**
- **Strict:** Returns NaN if any value is invalid (use when data quality matters)
- **Replace non-numeric:** Substitutes a specific value for invalid data points
---
### Calculate rate of change
**Scenario:** Show how quickly memory usage is increasing or decreasing.
**Setup:**
- Query A (Prometheus): `node_memory_MemUsed_bytes`
- Query B (Prometheus): `node_memory_MemUsed_bytes offset 5m`
- Expression C (Math): `$A - $B`
**Result:** Bytes of memory change over the last 5 minutes. Positive = increasing, negative = decreasing.
**As a percentage change:**
- Expression C (Math): `($A - $B) / $B * 100`
---
## Advanced examples
These examples demonstrate complex multi-step calculations and sophisticated alerting patterns.
### Compare current value to 24-hour average
**Scenario:** Highlight when current traffic is significantly above or below the daily norm.
**Setup:**
- Query A (Prometheus): `sum(rate(http_requests_total[24h]))` (historical average)
- Query B (Prometheus): `sum(rate(http_requests_total[5m]))` (current rate)
- Expression C (Reduce): Input `$A`, Function: **Mean**
- Expression D (Math): `($B - $C) / $C * 100`
**Result:** Percentage difference from the 24-hour average. +50 means 50% above normal, -30 means 30% below normal.
**Use cases:**
- Detect traffic anomalies
- Identify unusual load patterns
- Trigger alerts for significant deviations
---
### Calculate service level indicator (SLI)
**Scenario:** Calculate the percentage of requests meeting your latency target (under 200ms).
**Setup:**
- Query A (Prometheus): `sum(rate(http_request_duration_seconds_bucket{le="0.2"}[5m]))`
- Query B (Prometheus): `sum(rate(http_request_duration_seconds_count[5m]))`
- Expression C (Math): `$A / $B * 100`
**Result:** Percentage of requests completing in under 200ms (your SLI).
**For SLO alerting:** Alert when SLI drops below 99%:
- Expression D (Reduce): Input `$C`, Function: **Mean**
- Expression E (Math): `$D < 99`
---
### Multi-host alerts with reduction
**Scenario:** Alert when average CPU across all production servers exceeds 80%.
**Setup:**
- Query A (Prometheus): `100 - (avg by(instance)(rate(node_cpu_seconds_total{mode="idle",env="production"}[5m])) * 100)`
- Expression B (Reduce): Input `$A`, Function: **Mean** (average across all hosts)
- Expression C (Math): `$B > 80`
**Result:** Single alert that fires when the fleet average crosses the threshold, not individual host alerts.
**Alternative - alert on any host:**
- Expression B (Reduce): Input `$A`, Function: **Max**
This alerts when any single host exceeds 80%.
---
### Calculate compound metrics
**Scenario:** Calculate Apdex score (Application Performance Index) from response time buckets.
**Setup:**
- Query A (Prometheus): `sum(rate(http_request_duration_seconds_bucket{le="0.5"}[5m]))` (satisfied: <500ms)
- Query B (Prometheus): `sum(rate(http_request_duration_seconds_bucket{le="2.0"}[5m]))` (tolerating: <2s)
- Query C (Prometheus): `sum(rate(http_request_duration_seconds_count[5m]))` (total)
- Expression D (Math): `($A + ($B - $A) / 2) / $C`
**Result:** Apdex score from 0 to 1, where 1 is perfect user satisfaction.
**Formula explained:** Apdex = (Satisfied + Tolerating/2) / Total
---
### Detect sustained conditions
**Scenario:** Alert only when CPU has been high for at least 5 minutes, not just a brief spike.
**Setup:**
- Query A (Prometheus): `avg_over_time(node_cpu_usage_percent[5m])`
- Expression B (Reduce): Input `$A`, Function: **Mean**
- Expression C (Math): `$B > 80`
**Result:** Alerts only fire when the 5-minute average exceeds the threshold, filtering out brief spikes.
**Alternative approach using count:**
- Query A: `node_cpu_usage_percent`
- Expression B (Math): `$A > 80`
- Expression C (Reduce): Input `$B`, Function: **Sum** (counts "1" values where condition is true)
- Expression D (Math): `$C > 5`
This alerts when more than 5 data points in the range exceed the threshold.
---
### Correlate metrics across systems
**Scenario:** Calculate orders processed per database query to measure backend efficiency.
**Setup:**
- Query A (Prometheus - App metrics): `sum(rate(orders_processed_total[5m]))`
- Query B (MySQL data source): Database queries per second from performance schema
- Expression C (Resample): Input `$A`, Resample to: `30s`, Downsample: Mean
- Expression D (Resample): Input `$B`, Resample to: `30s`, Downsample: Mean
- Expression E (Math): `$C / $D`
**Result:** Orders per database query, showing how efficiently your backend processes orders.
**Lower is better:** Fewer queries per order means more efficient database usage.
---
### Ratio-based alerts with baseline
**Scenario:** Alert when error ratio increases by more than 2x compared to yesterday's baseline.
**Setup:**
- Query A (Prometheus): `sum(rate(http_errors_total[5m]))` (current errors)
- Query B (Prometheus): `sum(rate(http_requests_total[5m]))` (current requests)
- Query C (Prometheus): `sum(rate(http_errors_total[5m] offset 24h))` (yesterday's errors)
- Query D (Prometheus): `sum(rate(http_requests_total[5m] offset 24h))` (yesterday's requests)
- Expression E (Math): `$A / $B` (current error rate)
- Expression F (Math): `$C / $D` (baseline error rate)
- Expression G (Reduce): Input `$E`, Function: **Mean**
- Expression H (Reduce): Input `$F`, Function: **Mean**
- Expression I (Math): `$G / $H > 2`
**Result:** Alerts when today's error rate is more than double yesterday's rate.
**Why this matters:** Absolute thresholds don't account for normal variation. Ratio-based alerting adapts to your system's baseline behavior.
---
### Calculate percentile-based thresholds
**Scenario:** Alert when response time exceeds the 95th percentile baseline.
**Setup:**
- Query A (Prometheus): `histogram_quantile(0.95, sum(rate(http_request_duration_seconds_bucket[5m])) by (le))`
- Query B (Prometheus): `histogram_quantile(0.95, sum(rate(http_request_duration_seconds_bucket[1h])) by (le))`
- Expression C (Reduce): Input `$A`, Function: **Last** (current p95)
- Expression D (Reduce): Input `$B`, Function: **Mean** (baseline p95)
- Expression E (Math): `$C > $D * 1.5`
**Result:** Alerts when current p95 latency exceeds 1.5x the hourly baseline.
---
### Weighted scores across metrics
**Scenario:** Create a composite health score from multiple metrics (CPU, memory, disk, network).
**Setup:**
- Query A: CPU usage percentage (0-100)
- Query B: Memory usage percentage (0-100)
- Query C: Disk usage percentage (0-100)
- Query D: Network saturation percentage (0-100)
- Expression E (Reduce): Input `$A`, Function: **Mean**
- Expression F (Reduce): Input `$B`, Function: **Mean**
- Expression G (Reduce): Input `$C`, Function: **Mean**
- Expression H (Reduce): Input `$D`, Function: **Mean**
- Expression I (Math): `($E * 0.3) + ($F * 0.25) + ($G * 0.25) + ($H * 0.2)`
**Result:** Weighted health score from 0-100 where lower is healthier. Weights reflect relative importance (CPU 30%, Memory 25%, Disk 25%, Network 20%).
**For alerting:**
- Expression J (Math): `$I > 70`
Alert when composite score indicates degraded health.
---
### Conditional logic with fallbacks
**Scenario:** Show error rate, but display 0 instead of infinity when there are no requests.
**Setup:**
- Query A (Prometheus): `sum(rate(http_errors_total[5m]))`
- Query B (Prometheus): `sum(rate(http_requests_total[5m]))`
- Expression C (Math): `$B > 0 ? ($A / $B * 100) : 0`
**Result:** Error rate percentage that safely handles zero-request periods.
**Conditional syntax:** `condition ? value_if_true : value_if_false`
**More examples:**
- Cap values at 100: `$A > 100 ? 100 : $A`
- Convert negative to zero: `$A < 0 ? 0 : $A`
- Binary classification: `$A > threshold ? 1 : 0`
---
### Time-window comparison for trend detection
**Scenario:** Detect if metrics are trending up or down by comparing recent data to slightly older data.
**Setup:**
- Query A (Prometheus): `avg_over_time(http_requests_total[5m])`
- Query B (Prometheus): `avg_over_time(http_requests_total[5m] offset 10m)`
- Expression C (Reduce): Input `$A`, Function: **Mean**
- Expression D (Reduce): Input `$B`, Function: **Mean**
- Expression E (Math): `($C - $D) / $D * 100`
**Result:** Percentage change in requests between the last 5 minutes and the previous 5-minute window.
**Interpretation:**
- Positive values: Traffic increasing
- Negative values: Traffic decreasing
- Values near 0: Traffic stable
**Use case:** Detect rapid traffic changes that might indicate problems or attacks.
---
## Tips for expression development
Follow these best practices to build reliable, maintainable expressions in your visualizations and alerts.
### Start simple and iterate
Begin with basic operations and verify each step works before adding complexity. Use the Query Inspector to see intermediate results.
### Name your queries clearly
While RefIDs default to letters, you can use descriptive names. Referencing `${errors}` and `${total_requests}` is clearer than `$A` and `$B`.
### Test with realistic time ranges
Expressions may behave differently with various time ranges. Test with the same ranges you'll use in production dashboards or alerts.
### Handle edge cases
Consider what happens when:
- Data is missing (NoData)
- Values are zero (division by zero)
- Metrics haven't been collected yet
- Time series have different numbers of points
### Document complex expressions
Add panel descriptions or annotation text explaining what complex expressions calculate and why.
### Monitor expression performance
If dashboards become slow, check if expressions are processing too much data. Consider moving heavy aggregations to recording rules or data source queries.

View File

@@ -0,0 +1,263 @@
---
aliases:
- ../../../panels-visualizations/query-transform-data/ # /docs/grafana/next/panels-visualizations/query-transform-data/
- ../../../panels-visualizations/query-transform-data/expression-queries/ # /docs/grafana/next/panels-visualizations/query-transform-data/expression-queries/
- ../../../panels/query-a-data-source/use-expressions-to-manipulate-data/ # /docs/grafana/next/panels/query-a-data-source/use-expressions-to-manipulate-data/
- ../../../panels/query-a-data-source/use-expressions-to-manipulate-data/about-expressions/ # /docs/grafana/next/panels/query-a-data-source/use-expressions-to-manipulate-data/about-expressions/
- ../../../panels/query-a-data-source/use-expressions-to-manipulate-data/write-an-expression/ # /docs/grafana/next/panels/query-a-data-source/use-expressions-to-manipulate-data/write-an-expression/
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Write expression queries
title: Write expression queries
description: Write server-side expressions to manipulate data using math and other operations
weight: 40
refs:
no-data-and-error-handling:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/#configure-no-data-and-error-handling
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/#configure-no-data-and-error-handling
multiple-dimensional-data:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/timeseries-dimensions/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/timeseries-dimensions/
grafana-alerting:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
labels:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/timeseries-dimensions/#labels
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/timeseries-dimensions/#labels
---
# Write expression queries
Server-side expressions enable you to manipulate data returned from queries with math and other operations. Expressions create new data and do not manipulate the data returned by data sources.
## About expressions
Server-side expressions allow you to manipulate data returned from queries with math and other operations. Expressions create new data and do not manipulate the data returned by data sources, aside from some minor data restructuring to make the data acceptable input for expressions.
### Using expressions
Expressions are most commonly used for [Grafana Alerting](ref:grafana-alerting). The processing is done server-side, so expressions can operate without a browser session. However, expressions can also be used with backend data sources and visualization.
{{< admonition type="note" >}}
Expressions do not work with legacy dashboard alerts.
{{< /admonition >}}
Expressions are meant to augment data sources by enabling queries from different data sources to be combined or by providing operations unavailable in a data source.
{{< admonition type="note" >}}
When possible, you should do data processing inside the data source. Copying data from storage to the Grafana server for processing is inefficient, so expressions are targeted at lightweight data processing.
{{< /admonition >}}
Expressions work with data source queries that return time series or number data. They also operate on [multiple-dimensional data](ref:multiple-dimensional-data). For example, a query that returns multiple series, where each series is identified by labels or tags.
An individual expression takes one or more queries or other expressions as input and adds data to the result. Each individual expression or query is represented by a variable that is a named identifier known as its RefID (e.g., the default letter `A` or `B`).
To reference the output of an individual expression or a data source query in another expression, this identifier is used as a variable.
### Types of expressions
Expressions work with two types of data.
- A collection of time series.
- A collection of numbers, where each number is an item.
Each collection is returned from a single data source query or expression and represented by the RefID. Each collection is a set, where each item in the set is uniquely identified by its dimensions which are stored as [labels](ref:labels) or key-value pairs.
### Data source queries
Server-side expressions only support data source queries for backend data sources. The data is generally assumed to be labeled time series data. In the future we intend to add an assertion of the query return type (number or time series) data so expressions can handle errors better.
Data source queries, when used with expressions, are executed by the expression engine. When it does this, it restructures data to be either one time series or one number per data frame. So for example if using a data source that returns multiple series on one frame in the table view, you might notice it looks different when executed with expressions.
Currently, the only non-time series format (number) is supported when you're using data frames and you have a table response that returns a data frame with no time, string columns, and one number column:
| Loc | Host | Avg_CPU |
| --- | ---- | ------- |
| MIA | A | 1 |
| NYC | B | 2 |
The example above will produce a number that works with expressions. The string columns become labels and the number column the corresponding value. For example `{"Loc": "MIA", "Host": "A"}` with a value of 1.
### Operations
You can use the following operations in expressions: math, reduce, and resample.
#### Math
Math is for free-form math formulas on time series or number data. Math operations take numbers and time series as input and change them to different numbers and time series.
Data from other queries or expressions are referenced with the RefID prefixed with a dollar sign, for example `$A`. If the variable has spaces in the name, then you can use a brace syntax like `${my variable}`.
Numeric constants may be in decimal (`2.24`), octal (with a leading zero like `072`), or hex (with a leading 0x like `0x2A`). Exponentials and signs are also supported (e.g., `-0.8e-2`).
##### Operators
The arithmetic (`+`, binary and unary `-`, `*`, `/`, `%`, exponent `**`), relational (`<`, `>`, `==`, `!=`, `>=`, `<=`), and logical (`&&`, `||`, and unary `!`) operators are supported.
How the operation behaves with data depends on if it is a number or time series data.
With binary operations, such as `$A + $B` or `$A || $B`, the operator is applied in the following ways depending on the type of data:
- If both `$A` and `$B` are a number, then the operation is performed between the two numbers.
- If one variable is a number, and the other variable is a time series, then the operation between the value of each point in the time series and the number is performed.
- If both `$A` and `$B` are time series data, then the operation between each value in the two series is performed for each time stamp that exists in both `$A` and `$B`. The Resample operation can be used to line up time stamps. (**Note:** in the future, we plan to add options to the Math operation for different behaviors).
Summary:
- Number OP number = number
- Number OP series = series
- Series OP series = series
Because expressions work with multiple series or numbers represented by a single variable, binary operations also perform a union (join) between the two variables. This is done based on the identifying labels associated with each individual series or number.
So if you have numbers with labels like `{host=web01}` in `$A` and another number in `$B` with the same labels then the operation is performed between those two items within each variable, and the result will share the same labels. The rules for the behavior of this union are as follows:
- An item with no labels will join to anything.
- If both `$A` and `$B` each contain only one item (one series, or one number), they will join.
- If labels are exact match they will join.
- If labels are a subset of the other, for example and item in `$A` is labeled `{host=A,dc=MIA}` and item in `$B` is labeled `{host=A}` they will join.
- Currently, if within a variable such as `$A` there are different tag _keys_ for each item, the join behavior is undefined.
The relational and logical operators return 0 for false 1 for true.
##### Math Functions
While most functions exist in the own expression operations, the math operation does have some functions similar to math operators or symbols. When functions can take either numbers or series, than the same type as the argument will be returned. When it is a series, the operation of performed for the value of each point in the series.
###### abs
abs returns the absolute value of its argument which can be a number or a series. For example `abs(-1)` or `abs($A)`.
###### is_inf
is_inf takes a number or a series and returns `1` for `Inf` values (negative or positive) and `0` for other values. For example `is_inf($A)`.
{{< admonition type="note" >}}
If you need to specifically check for negative infinity for example, you can do a comparison like `$A == infn()`.
{{< /admonition >}}
###### is_nan
is_nan takes a number or a series and returns `1` for `NaN` values and `0` for other values. For example `is_nan($A)`. This function exists because `NaN` is not equal to `NaN`.
###### is_null
is_null takes a number or a series and returns `1` for `null` values and `0` for other values. For example `is_null($A)`.
###### is_number
is_number takes a number or a series and returns `1` for all real number values and `0` for other values (which are `null`, `Inf+`, `Inf-`, and `NaN`). For example `is_number($A)`.
###### log
Log returns the natural logarithm of of its argument which can be a number or a series. If the value is less than 0, NaN is returned. For example `log(-1)` or `log($A)`.
###### inf, infn, nan, and null
The inf, infn, nan, and null functions all return a single value of the name. They primarily exist for testing. Example: `null()`.
###### round
Round returns a rounded integer value. For example, `round(3.123)` or `round($A)`. (This function should probably take an argument so it can add precision to the rounded value).
###### ceil
Ceil rounds the number up to the nearest integer value. For example, `ceil(3.123)` returns 4.
###### floor
Floor rounds the number down to the nearest integer value. For example, `floor(3.123)` returns 3.
#### Reduce
Reduce takes one or more time series returned from a query or an expression and turns each series into a single number. The labels of the time series are kept as labels on each outputted reduced number.
**Fields:**
- **Function -** The reduction function to use
- **Input -** The variable (refID (such as `A`)) to resample
- **Mode -** Allows control behavior of reduction function when a series contains non-numerical values (null, NaN, +\-Inf)
##### Reduction Functions
###### Count
Count returns the number of points in each series.
###### Mean
Mean returns the total of all values in each series divided by the number of points in that series. In `strict` mode if any values in the series are null or nan, or if the series is empty, NaN is returned.
###### Min and Max
Min and Max return the smallest or largest value in the series respectively. In `strict` mode if any values in the series are null or nan, or if the series is empty, NaN is returned.
###### Sum
Sum returns the total of all values in the series. If series is of zero length, the sum will be 0. In `strict` mode if there are any NaN or Null values in the series, NaN is returned.
##### Last
Last returns the last number in the series. If the series has no values then returns NaN.
##### Reduction Modes
###### Strict
In Strict mode the input series is processed as is. If any values in the series are non-numeric (null, NaN or +\-Inf), NaN is returned.
###### Drop Non-Numeric
In this mode all non-numeric values (null, NaN or +\-Inf) in the input series are filtered out before executing the reduction function.
###### Replace Non-Numeric
In this mode all non-numeric values are replaced by a pre-defined value.
#### Resample
Resample changes the time stamps in each time series to have a consistent time interval. The main use case is so you can resample time series that do not share the same timestamps so math can be performed between them. This can be done by resample each of the two series, and then in a Math operation referencing the resampled variables.
**Fields:**
- **Input -** The variable of time series data (refID (such as `A`)) to resample
- **Resample to -** The duration of time to resample to, for example `10s`. Units may be `s` seconds, `m` for minutes, `h` for hours, `d` for days, `w` for weeks, and `y` of years.
- **Downsample -** The reduction function to use when there are more than one data point per window sample. See the reduction operation for behavior details.
- **Upsample -** The method to use to fill a window sample that has no data points.
- **pad** fills with the last know value
- **backfill** with next known value
- **fillna** to fill empty sample windows with NaNs
## Write an expression
If your data source supports them, then Grafana displays the **Expression** button and shows any existing expressions in the query editor list.
For more information about expressions, refer to [About expressions](#about-expressions).
1. Open the panel.
1. Below the query, click **Expression**.
1. In the **Operation** field, select the type of expression you want to write.
For more information about expression operations, refer to [About expressions](#about-expressions).
1. Write the expression.
1. Click **Apply**.
## Special cases
When any queried data source returns no series or numbers, the expression engine returns `NoData`. For example, if a request contains two data source queries that are merged by an expression, if `NoData` is returned by at least one of the data source queries, then the returned result for the entire query is `NoData`.
For more information about how [Grafana Alerting](ref:grafana-alerting) processes `NoData` results, refer to [No data and error handling](ref:no-data-and-error-handling).
In the case of using an expression on multiple queries, the expression engine requires that all of the queries return an identical timestamp. For example, if using math to combine the results of multiple SQL queries which each use `SELECT NOW() AS "time"`, the expression will only work if all queries evaluate `NOW()` to an identical timestamp; which does not always happen. To resolve this, you can replace `NOW()` with an arbitrary time, such as `SELECT 1 AS "time"`, or any other valid UNIX timestamp.

View File

@@ -1,507 +0,0 @@
---
aliases:
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Troubleshoot expressions
title: Troubleshoot Grafana expressions
description: Debug and resolve common issues when working with Grafana Expressions
weight: 50
refs:
grafana-expressions:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/visualizations/panels-visualizations/query-transform-data/expression-queries/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/visualizations/panels-visualizations/query-transform-data/expression-queries/
transformations:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
---
# Troubleshoot Grafana expressions
This guide helps you diagnose and resolve common issues when working with expressions.
## Debug expressions
When an expression doesn't produce the expected results, use these strategies to identify the problem.
### Test expressions step by step
Break complex expressions into smaller pieces and verify each step:
1. **Test individual queries first:** Ensure each data source query returns the expected data before adding expressions.
1. **Add expressions incrementally:** Start with a simple expression and gradually add complexity.
1. **Use separate panels for testing:** Create a temporary panel to test expressions in isolation.
1. **Check intermediate results:** Add expressions at each step of your calculation to see intermediate values.
**Example:**
Instead of creating `($A - $B) / $C * 100` immediately, build it incrementally:
- Expression D: `$A - $B` (verify the subtraction works)
- Expression E: `$D / $C` (verify the division works)
- Expression F: `$E * 100` (final percentage)
Once working, you can collapse them into a single expression if desired.
### Verify RefID references
Ensure you're referencing the correct queries and expressions:
- RefIDs are case-sensitive: `$A` is different from `$a`
- Check that RefIDs haven't changed after reordering queries
- Use `${RefID}` syntax for RefIDs with spaces or special characters
### Check data types
Expressions expect specific data types. Verify your queries return time series or numbers, not tables or other formats.
**Common issues:**
- SQL queries returning multiple columns (expressions need one value column)
- Queries returning string data instead of numbers
- Empty result sets that appear as NoData
### Inspect labels
Use the Table view in panels to see the labels on your series and verify they match as expected.
**What to check:**
- Do series from different queries have compatible labels for joining?
- Are label names spelled consistently across queries?
- Are there unexpected extra labels preventing matches?
## Common errors and solutions
Following are common errors and how to troubleshoot them.
### "NoData" result
**Problem:** Your expression returns NoData even though some queries have data.
**Causes and solutions:**
- **One query returns no data:** If any query in an expression returns NoData, the entire expression returns NoData. Check that all queries have data for the selected time range.
- **Mismatched time ranges:** Ensure all queries use compatible time ranges. A query with "Last 5 minutes" can't combine with a query using "Last 24 hours" without adjustments.
- **Backend data source required:** Expressions only work with backend data sources. Check that you're not using browser-based data sources.
**Solution:**
Test each query independently to identify which one returns NoData, then investigate why that query has no data.
### No series match in math operations
**Problem:** Math expression like `$A + $B` returns no data, but both queries return data.
**Causes and solutions:**
- **Label mismatch:** Series from `$A` and `$B` have different labels that prevent automatic matching.
**Example:** `$A` has `{host="web01", region="us-east"}` but `$B` has `{server="web01", region="us-east"}`. The different label names (`host` vs `server`) prevent matching.
**Solution:** Modify your queries to use consistent label names, or ensure one set of series has no labels (which matches anything).
- **No overlapping timestamps:** Time series need matching timestamps for math operations.
**Solution:** Use the Resample operation to align timestamps to a common interval.
### Timestamp mismatch errors
**Problem:** Combining results from multiple SQL queries fails because timestamps don't align.
**Example:**
```sql
-- Query A
SELECT NOW() AS "time", COUNT(*) as "errors" FROM error_log;
-- Query B
SELECT NOW() AS "time", COUNT(*) as "requests" FROM request_log;
```
These queries may execute at slightly different times, producing different timestamps.
**Solution 1 - Use fixed timestamps:**
```sql
-- Query A
SELECT 1 AS "time", COUNT(*) as "errors" FROM error_log;
-- Query B
SELECT 1 AS "time", COUNT(*) as "requests" FROM request_log;
```
**Solution 2 - Use consistent time references:**
Ensure all queries evaluate time identically by using the same timestamp variable or function.
**Solution 3 - Use Resample:**
Add Resample operations to align both series to a common interval before performing math.
### Math operations produce unexpected nulls or NaN
**Problem:** Expression results contain null or NaN values unexpectedly.
**Causes and solutions:**
- **Division by zero:** Dividing by zero produces infinity. Use conditional logic: `$A > 0 ? $B / $A : 0`
- **Logarithm of negative numbers:** `log()` of negative values returns NaN.
- **Operations on null values:** Math operations involving null typically produce null.
**Solution:**
Use data quality functions to filter or handle problematic values:
```
is_number($A) ? $A : 0
```
Or use Reduce with "Drop non-numeric" mode to clean data before calculations.
### Reduce returns NaN in strict mode
**Problem:** Reduce operation returns NaN even though most data points are valid.
**Cause:** Strict mode returns NaN if _any_ value in the series is null, NaN, or infinity.
**Solution:**
Change the reduction mode:
- **Drop non-numeric:** Ignores invalid values and calculates from valid ones
- **Replace non-numeric:** Substitutes a specific value for invalid data points
Use Strict mode only when data quality is critical and you want to know if any values are invalid.
### Expression works in panel but fails in alerting
**Problem:** Expression displays correctly in a panel but produces errors or unexpected results in alert rules.
**Causes and solutions:**
- **Time range differences:** Alerts use specific time ranges that may differ from your panel's time range. Verify the alert's time range settings.
- **Data availability:** Data may be available when viewing the panel but missing when the alert evaluates.
- **Reduce required for alerting:** Most alert conditions need single values. Ensure you're using Reduce to convert time series to numbers for threshold comparisons.
**Solution:**
Test your expression in a panel using the same time range as your alert rule.
## Work with timestamps
Timestamps can be a common source of issues when working with expressions. Here's how to handle them effectively.
### Understand timestamp alignment
Math operations between time series require matching timestamps. If series `$A` has points at `10:00:00`, `10:00:30`, `10:01:00` and series `$B` has points at `10:00:15`, `10:00:45`, `10:01:15`, the operation `$A + $B` produces no results because no timestamps match exactly.
### When to resample
Use Resample when:
- Combining data from sources with different collection intervals
- One data source reports irregularly while another reports at fixed intervals
- You need to ensure timestamps align for math operations
- You want to normalize data to a consistent interval for visualization
### Resample strategies
**Downsample (reducing frequency):**
When going from higher to lower frequency (for example, 10s intervals to 1m intervals), choose an appropriate reduction function:
- **Mean:** For averaging values (CPU percentage, temperature)
- **Max:** For peak values (maximum memory usage)
- **Min:** For minimum values (lowest throughput)
- **Sum:** For accumulating values (request counts, error totals)
**Upsample (increasing frequency):**
When going from lower to higher frequency (for example, 1m intervals to 10s intervals), choose a fill strategy:
- **Pad (forward fill):** Assumes value stays constant until next measurement (good for state data)
- **Backfill:** Uses next known value (less common, use when future values inform past state)
- **fillna:** Inserts NaN for unknown intervals (explicit about missing data)
### SQL timestamp best practices
When writing SQL queries for use with expressions:
**Do:**
- Use consistent timestamp columns across queries
- Round or truncate timestamps to a common interval if needed
- Use fixed timestamps for non-time-based aggregations
```sql
-- Good: Consistent time bucket
SELECT
DATE_TRUNC('minute', timestamp) AS "time",
COUNT(*) as "value"
FROM events
GROUP BY 1
ORDER BY 1;
```
**Don't:**
- Use `NOW()` or `CURRENT_TIMESTAMP` which vary between query executions
- Mix different timestamp columns in related queries
- Return data without a time column for time series expressions
## Handle missing data
Understanding how expressions handle missing data helps you build robust dashboards and alerts.
### NoData propagation
When any query in an expression returns NoData, the entire expression result is NoData. This is by design to prevent calculations on incomplete data.
**Example:**
```
Expression: $A / $B
- Query A returns: 100
- Query B returns: NoData
- Expression result: NoData (not 100, not error)
```
### Strategies for missing data
**1. Use default values:**
Modify your data source queries to return zero or a default value instead of no data.
**2. Build conditional logic:**
Use multiple expressions to check for data availability before performing calculations.
**3. Adjust time ranges:**
Ensure queries use time ranges likely to have data. If a service only reports every 5 minutes, don't query the last 1 minute.
**4. Configure alert NoData handling:**
In alerting, you can configure how NoData is treated (for example, trigger alert, don't trigger, or mark as special state).
### Missing data points vs NoData
**Missing data points:** Some points in a time series are null or absent, but the series exists.
- Handle with Reduce modes (Drop non-numeric, Replace non-numeric)
- Use data quality functions: `is_null($A)`, `is_number($A)`
**NoData:** No series returned at all from a query.
- Check query syntax and time range
- Verify data exists in the data source
- Ensure data source is reachable
## Performance considerations
Expressions run on the Grafana server, so understanding performance implications helps you build efficient dashboards and alerts.
### When expressions are inefficient
**Large data volumes:**
- Pulling millions of data points to Grafana for simple aggregations
- Better: Perform aggregation in the data source query
**Repeated operations:**
- Running the same calculation across many panels
- Better: Consider recording rules (Prometheus) or continuous queries (InfluxDB)
**Complex nested expressions:**
- Long chains of expressions that could be simplified
- Better: Simplify the expression or move logic to data source
### Optimization strategies
**1. Push processing to data sources:**
Instead of:
```
Query A: SELECT value FROM metrics
Expression B: Reduce(Mean, $A)
Expression C: $B > 100
```
Do in data source:
```
Query A: SELECT AVG(value) FROM metrics
Expression B: $A > 100
```
**2. Use appropriate time ranges:**
- Don't query years of data when hours suffice
- Match time ranges to your actual analysis needs
- Use relative time ranges for consistent performance
**3. Reduce data points before math:**
If you only need a single value for alerting, reduce first then perform math rather than calculating across every point:
**Less efficient:**
```
Expression A: $QueryA * 100 (multiplies every point)
Expression B: Reduce(Mean, $A)
```
**More efficient:**
```
Expression A: Reduce(Mean, $QueryA)
Expression B: $A * 100 (multiplies one value)
```
**4. Limit label cardinality:**
High-cardinality labels (many unique values) multiply the number of series. If querying metrics with thousands of unique host labels, consider aggregating in the data source.
### Monitor expression performance
Watch for these warning signs:
- Panels take more than 2-3 seconds to load
- Query inspector shows expressions processing thousands of series
- Grafana server CPU spikes when loading dashboards
- Alert evaluation takes significant time
If you see these issues, review your expressions for optimization opportunities.
## Expressions vs transformations
Both expressions and transformations manipulate query data, but they serve different purposes and have different capabilities.
### When to use expressions
Use expressions when:
- **Server-side processing required:** Alerting requires server-side evaluation
- **Cross-data-source operations:** Combining data from different data sources
- **Label-based matching:** Automatic series matching based on labels
- **Simple math and aggregations:** Basic calculations and reductions
- **Backend data sources:** Working with backend/server-side data sources
**Advantages:**
- Work in alerting rules
- Operate on data before visualization
- Support cross-data-source calculations
- Preserve label-based series relationships
**Limitations:**
- Only work with backend data sources
- Limited operation types (Math, Reduce, Resample)
- Less flexible than transformations for complex data reshaping
- Can't modify table structures significantly
### When to use transformations
Use transformations when:
- **Complex data reshaping:** Pivoting, merging, or restructuring data
- **Table operations:** Working with tabular data formats
- **Field manipulation:** Renaming, organizing, or filtering fields
- **Client-side only needed:** Visualization changes that don't affect alerting
- **Advanced processing:** Operations not available in expressions
**Advantages:**
- More operation types available
- Better for complex table manipulations
- Work with any data source (including browser-based)
- More flexible field and column operations
- Can dramatically reshape data structures
**Limitations:**
- Don't work in alerting (client-side only)
- Can't combine different data sources
- Process data after query execution
- Don't preserve complex label relationships
### Comparison table
| Feature | Expressions | Transformations |
|---------|------------|-----------------|
| Works in alerts | Yes | No |
| Combines data sources | Yes | No |
| Available operations | 3 types (Math, Reduce, Resample) | 20+ types |
| Execution | Server-side | Client-side (browser) |
| Data source support | Backend only | All data sources |
| Label matching | Automatic | Manual |
| Table operations | Limited | Extensive |
| Performance | Uses server resources | Uses browser resources |
### Use both together
You can use expressions and transformations in the same panel:
1. Expressions run first (server-side)
1. Transformations run after (client-side)
**Example workflow:**
- Query A: Prometheus metric
- Query B: SQL query
- Expression C: Combine `$A` and `$B` (server-side)
- Transformation: Rename fields, organize columns (client-side)
This approach lets you leverage the strengths of both systems.
### Migration considerations
**From transformations to expressions:**
Consider this when:
- You need the same logic in alerting
- You're combining data sources
- Server-side processing would improve performance
**Limitations:**
- May need to redesign complex transformations
- Some transformation operations have no expression equivalent
- Need backend data sources
**From expressions to transformations:**
Consider this when:
- You need more complex data manipulation
- You're working with browser-based data sources
- You need advanced table operations
**Limitations:**
- Can't use in alerting
- Can't combine different data sources
- May need to change query structure
## Get help
If you're still experiencing issues after trying these troubleshooting steps:
1. **Check the Query Inspector:** Click the Query Inspector button to see raw query results and expression outputs
1. **Review Grafana logs:** Server-side expression errors appear in Grafana server logs
1. **Simplify and isolate:** Create a minimal example that reproduces the issue
1. **Community resources:** Search or post in the Grafana community forums
1. **Documentation:** Refer to [Grafana Expressions](ref:grafana-expressions) for detailed operation documentation
When asking for help, include:
- Grafana version
- Data source type and version
- Simplified example of your queries and expressions
- Expected vs actual results
- Any error messages from Query Inspector or logs

View File

@@ -1372,11 +1372,6 @@
"count": 2
}
},
"public/app/features/alerting/unified/components/AlertLabelDropdown.tsx": {
"no-restricted-syntax": {
"count": 1
}
},
"public/app/features/alerting/unified/components/AnnotationDetailsField.tsx": {
"@typescript-eslint/consistent-type-assertions": {
"count": 1
@@ -1593,11 +1588,6 @@
"count": 3
}
},
"public/app/features/alerting/unified/components/rule-editor/labels/LabelsField.tsx": {
"no-restricted-syntax": {
"count": 4
}
},
"public/app/features/alerting/unified/components/rule-editor/query-and-alert-condition/CloudDataSourceSelector.tsx": {
"no-restricted-syntax": {
"count": 1

View File

@@ -715,11 +715,9 @@ export {
export {
type VisualizationSuggestion,
type VisualizationSuggestionsSupplier,
type VisualizationSuggestionsSupplierFn,
type PanelPluginVisualizationSuggestion,
type VisualizationSuggestionsBuilder,
VisualizationSuggestionScore,
VisualizationSuggestionsBuilder,
VisualizationSuggestionsListAppender,
} from './types/suggestions';
export {
type MatcherConfig,

View File

@@ -1,14 +1,18 @@
import { createDataFrame } from '../dataframe/processDataFrame';
import { identityOverrideProcessor } from '../field/overrides/processors';
import {
StandardEditorsRegistryItem,
standardEditorsRegistry,
standardFieldConfigEditorRegistry,
} from '../field/standardFieldConfigEditorRegistry';
import { FieldType } from '../types/dataFrame';
import { FieldConfigProperty, FieldConfigPropertyItem } from '../types/fieldOverrides';
import { PanelMigrationModel } from '../types/panel';
import { VisualizationSuggestionsBuilder, VisualizationSuggestionScore } from '../types/suggestions';
import { PanelOptionsEditorBuilder } from '../utils/OptionsUIBuilders';
import { PanelPlugin } from './PanelPlugin';
import { getPanelDataSummary } from './suggestions/getPanelDataSummary';
describe('PanelPlugin', () => {
describe('declarative options', () => {
@@ -483,4 +487,107 @@ describe('PanelPlugin', () => {
});
});
});
describe('suggestions', () => {
it('should register a suggestions supplier', () => {
const panel = new PanelPlugin(() => <div>Panel</div>);
panel.meta = panel.meta || {};
panel.meta.id = 'test-panel';
panel.meta.name = 'Test Panel';
panel.setSuggestionsSupplier((ds) => {
if (!ds.hasFieldType(FieldType.number)) {
return;
}
return [
{
name: 'Number Panel',
score: VisualizationSuggestionScore.Good,
},
];
});
const suggestions = panel.getSuggestions(
getPanelDataSummary([createDataFrame({ fields: [{ type: FieldType.number, name: 'Value' }] })])
);
expect(suggestions).toHaveLength(1);
expect(suggestions![0].pluginId).toBe(panel.meta.id);
expect(suggestions![0].name).toBe('Number Panel');
expect(
panel.getSuggestions(
getPanelDataSummary([createDataFrame({ fields: [{ type: FieldType.string, name: 'Value' }] })])
)
).toBeUndefined();
});
it('should not throw for the old syntax, but also should not register suggestions', () => {
jest.spyOn(console, 'warn').mockImplementation();
class DeprecatedSuggestionsSupplier {
getSuggestionsForData(builder: VisualizationSuggestionsBuilder): void {
const appender = builder.getListAppender({
name: 'Deprecated Suggestion',
pluginId: 'deprecated-plugin',
options: {},
});
if (builder.dataSummary.hasNumberField) {
appender.append({});
}
}
}
const panel = new PanelPlugin(() => <div>Panel</div>);
expect(() => {
panel.setSuggestionsSupplier(new DeprecatedSuggestionsSupplier());
}).not.toThrow();
expect(console.warn).toHaveBeenCalled();
expect(
panel.getSuggestions(
getPanelDataSummary([
createDataFrame({
fields: [{ type: FieldType.number, name: 'Value', values: [1, 2, 3, 4, 5] }],
}),
])
)
).toBeUndefined();
});
it('should support the deprecated pattern of getSuggestionsSupplier with builder', () => {
jest.spyOn(console, 'warn').mockImplementation();
const panel = new PanelPlugin(() => <div>Panel</div>).setSuggestionsSupplier((ds) => {
if (!ds.hasFieldType(FieldType.number)) {
return;
}
return [
{
name: 'Number Panel',
score: VisualizationSuggestionScore.Good,
},
];
});
const oldSupplier = panel.getSuggestionsSupplier();
const builder1 = new VisualizationSuggestionsBuilder([
createDataFrame({ fields: [{ type: FieldType.number, name: 'Value' }] }),
]);
oldSupplier.getSuggestionsForData(builder1);
const suggestions1 = builder1.getList();
expect(suggestions1).toHaveLength(1);
expect(suggestions1![0].pluginId).toBe(panel.meta.id);
expect(suggestions1![0].name).toBe('Number Panel');
const builder2 = new VisualizationSuggestionsBuilder([
createDataFrame({ fields: [{ type: FieldType.string, name: 'Value' }] }),
]);
oldSupplier.getSuggestionsForData(builder2);
const suggestions2 = builder2.getList();
expect(suggestions2).toHaveLength(0);
});
});
});

View File

@@ -1,4 +1,4 @@
import { set } from 'lodash';
import { defaultsDeep, set } from 'lodash';
import { ComponentClass, ComponentType } from 'react';
import { FieldConfigOptionsRegistry } from '../field/FieldConfigOptionsRegistry';
@@ -14,11 +14,19 @@ import {
PanelPluginDataSupport,
} from '../types/panel';
import { GrafanaPlugin } from '../types/plugin';
import { VisualizationSuggestionsSupplierFn, VisualizationSuggestionsSupplier } from '../types/suggestions';
import {
getSuggestionHash,
PanelPluginVisualizationSuggestion,
VisualizationSuggestion,
VisualizationSuggestionsSupplierDeprecated,
VisualizationSuggestionsSupplier,
VisualizationSuggestionsBuilder,
} from '../types/suggestions';
import { FieldConfigEditorBuilder, PanelOptionsEditorBuilder } from '../utils/OptionsUIBuilders';
import { deprecationWarning } from '../utils/deprecationWarning';
import { createFieldConfigRegistry } from './registryFactories';
import { PanelDataSummary } from './suggestions/getPanelDataSummary';
/** @beta */
export type StandardOptionConfig = {
@@ -109,7 +117,7 @@ export class PanelPlugin<
};
private optionsSupplier?: PanelOptionsSupplier<TOptions>;
private suggestionsSupplier?: VisualizationSuggestionsSupplier;
private suggestionsSupplier?: VisualizationSuggestionsSupplier<TOptions, TFieldConfigOptions>;
panel: ComponentType<PanelProps<TOptions>> | null;
editor?: ComponentClass<PanelEditorProps<TOptions>>;
@@ -363,56 +371,84 @@ export class PanelPlugin<
}
/**
* @deprecated use VisualizationSuggestionsSupplierFn
* @deprecated use VisualizationSuggestionsSupplier
*/
setSuggestionsSupplier(supplier: VisualizationSuggestionsSupplier): this;
setSuggestionsSupplier(supplier: VisualizationSuggestionsSupplierDeprecated): this;
/**
* @alpha
* sets function that can return visualization examples and suggestions.
*/
setSuggestionsSupplier(supplier: VisualizationSuggestionsSupplierFn<TOptions, TFieldConfigOptions>): this;
setSuggestionsSupplier(supplier: VisualizationSuggestionsSupplier<TOptions, TFieldConfigOptions>): this;
setSuggestionsSupplier(
supplier: VisualizationSuggestionsSupplier | VisualizationSuggestionsSupplierFn<TOptions, TFieldConfigOptions>
supplier:
| VisualizationSuggestionsSupplier<TOptions, TFieldConfigOptions>
| VisualizationSuggestionsSupplierDeprecated
): this {
this.suggestionsSupplier =
typeof supplier === 'function'
? {
getSuggestionsForData: (builder) => {
const appender = builder.getListAppender<TOptions, TFieldConfigOptions>({
pluginId: this.meta.id,
name: this.meta.name,
options: {},
fieldConfig: {
defaults: {},
overrides: [],
},
});
const result = supplier(builder.dataSummary);
if (Array.isArray(result)) {
appender.appendAll(result);
}
},
}
: supplier;
if (typeof supplier !== 'function') {
deprecationWarning(
'PanelPlugin',
'plugin.setSuggestionsSupplier(new Supplier())',
'plugin.setSuggestionsSupplier(dataSummary => [...])'
);
return this;
}
this.suggestionsSupplier = supplier;
return this;
}
/**
* Returns the suggestions supplier
* @alpha
* get suggestions based on the PanelDataSummary
*/
getSuggestionsSupplier(): VisualizationSuggestionsSupplier | undefined {
return this.suggestionsSupplier;
getSuggestions(
panelDataSummary: PanelDataSummary
): Array<PanelPluginVisualizationSuggestion<TOptions, TFieldConfigOptions>> | void {
const withDefaults = (
suggestion: VisualizationSuggestion<TOptions, TFieldConfigOptions>
): Omit<PanelPluginVisualizationSuggestion<TOptions, TFieldConfigOptions>, 'hash'> =>
defaultsDeep(suggestion, {
pluginId: this.meta.id,
name: this.meta.name,
options: {},
fieldConfig: {
defaults: {},
overrides: [],
},
} satisfies Omit<PanelPluginVisualizationSuggestion<TOptions, TFieldConfigOptions>, 'hash'>);
return this.suggestionsSupplier?.(panelDataSummary)?.map(
(s): PanelPluginVisualizationSuggestion<TOptions, TFieldConfigOptions> => {
const suggestionWithDefaults = withDefaults(s);
return Object.assign(suggestionWithDefaults, { hash: getSuggestionHash(suggestionWithDefaults) });
}
);
}
/**
* @alpha
* returns whether the plugin has configured suggestions
* @deprecated use getSuggestions
* we have to keep this method intact to support cloud-onboarding plugin.
*/
hasSuggestions(): boolean {
return this.suggestionsSupplier !== undefined;
getSuggestionsSupplier() {
const withDefaults = (
suggestion: VisualizationSuggestion<TOptions, TFieldConfigOptions>
): Omit<PanelPluginVisualizationSuggestion<TOptions, TFieldConfigOptions>, 'hash'> =>
defaultsDeep(suggestion, {
pluginId: this.meta.id,
name: this.meta.name,
options: {},
fieldConfig: {
defaults: {},
overrides: [],
},
} satisfies Omit<PanelPluginVisualizationSuggestion<TOptions, TFieldConfigOptions>, 'hash'>);
return {
getSuggestionsForData: (builder: VisualizationSuggestionsBuilder) => {
deprecationWarning('PanelPlugin', 'getSuggestionsSupplier()', 'getSuggestions(panelDataSummary)');
this.suggestionsSupplier?.(builder.dataSummary)?.forEach((s) => {
builder.getListAppender(withDefaults(s)).append(s);
});
},
};
}
hasPluginId(pluginId: string) {

View File

@@ -1143,6 +1143,11 @@ export interface FeatureToggles {
*/
newVizSuggestions?: boolean;
/**
* Enable all plugins to supply visualization suggestions (including 3rd party plugins)
* @default false
*/
externalVizSuggestions?: boolean;
/**
* Restrict PanelChrome contents with overflow: hidden;
* @default true
*/

View File

@@ -20,6 +20,8 @@ export type InterpolateFunction = (value: string, scopedVars?: ScopedVars, forma
export interface PanelPluginMeta extends PluginMeta {
/** Indicates that panel does not issue queries */
skipDataQuery?: boolean;
/** Indicates that the panel implements suggestions */
suggestions?: boolean;
/** Indicates that panel should not be available in visualisation picker */
hideFromList?: boolean;
/** Sort order */

View File

@@ -2,11 +2,10 @@ import { defaultsDeep } from 'lodash';
import { DataTransformerConfig } from '@grafana/schema';
import { PanelDataSummary, getPanelDataSummary } from '../panel/suggestions/getPanelDataSummary';
import { getPanelDataSummary, PanelDataSummary } from '../panel/suggestions/getPanelDataSummary';
import { PanelModel } from './dashboard';
import { DataFrame } from './dataFrame';
import { FieldConfigSource } from './fieldOverrides';
import { PanelData } from './panel';
/**
* @internal
@@ -108,35 +107,6 @@ export enum VisualizationSuggestionScore {
OK = 50,
}
/**
* @internal
* TODO this will move into the grafana app code once suppliers are migrated.
*/
export class VisualizationSuggestionsBuilder {
/** Summary stats for current data */
dataSummary: PanelDataSummary;
private list: PanelPluginVisualizationSuggestion[] = [];
constructor(
/** Current data */
public data?: PanelData,
/** Current panel & options */
public panel?: PanelModel
) {
this.dataSummary = getPanelDataSummary(data?.series);
}
getListAppender<TOptions extends unknown, TFieldConfig extends {} = {}>(
defaults: Omit<PanelPluginVisualizationSuggestion<TOptions, TFieldConfig>, 'hash'>
) {
return new VisualizationSuggestionsListAppender<TOptions, TFieldConfig>(this.list, defaults);
}
getList() {
return this.list;
}
}
/**
* @alpha
* TODO: this name is temporary; it will become just "VisualizationSuggestionsSupplier" when the other interface is deleted.
@@ -147,40 +117,48 @@ export class VisualizationSuggestionsBuilder {
* - returns an array of VisualizationSuggestions
* - boolean return equates to "show a single suggestion card for this panel plugin with the default options" (true = show, false or void = hide)
*/
export type VisualizationSuggestionsSupplierFn<TOptions extends unknown, TFieldConfig extends {} = {}> = (
export type VisualizationSuggestionsSupplier<TOptions extends unknown, TFieldConfig extends {} = {}> = (
panelDataSummary: PanelDataSummary
) => Array<VisualizationSuggestion<TOptions, TFieldConfig>> | void;
/**
* @deprecated use VisualizationSuggestionsSupplierFn instead.
* DEPRECATED - the below exports need to remain in the code base to help make the transition for the Polystat plugin, which implements
* suggestions using the old API. These should be removed for Grafana 13.
*/
export type VisualizationSuggestionsSupplier = {
/**
* Adds suitable suggestions for the current data
*/
/**
* @deprecated use VisualizationSuggestionsSupplier
*/
export interface VisualizationSuggestionsSupplierDeprecated {
getSuggestionsForData: (builder: VisualizationSuggestionsBuilder) => void;
};
}
/**
* @internal
* TODO this will move into the grafana app code once suppliers are migrated.
* @deprecated use VisualizationSuggestionsSupplier
*/
export class VisualizationSuggestionsListAppender<TOptions extends unknown, TFieldConfig extends {} = {}> {
constructor(
private list: VisualizationSuggestion[],
private defaults: Partial<PanelPluginVisualizationSuggestion<TOptions, TFieldConfig>> = {}
) {}
export class VisualizationSuggestionsBuilder {
public dataSummary: PanelDataSummary;
public list: PanelPluginVisualizationSuggestion[] = [];
append(suggestion: VisualizationSuggestion<TOptions, TFieldConfig>) {
this.appendAll([suggestion]);
constructor(dataFrames: DataFrame[]) {
this.dataSummary = getPanelDataSummary(dataFrames);
}
appendAll(suggestions: Array<VisualizationSuggestion<TOptions, TFieldConfig>>) {
this.list.push(
...suggestions.map((s): PanelPluginVisualizationSuggestion<TOptions, TFieldConfig> => {
const suggestionWithDefaults = defaultsDeep(s, this.defaults);
return Object.assign(suggestionWithDefaults, { hash: getSuggestionHash(suggestionWithDefaults) });
})
);
getList(): PanelPluginVisualizationSuggestion[] {
return this.list;
}
getListAppender(suggestionDefaults: Omit<PanelPluginVisualizationSuggestion, 'hash'>) {
const withDefaults = (suggestion: VisualizationSuggestion): PanelPluginVisualizationSuggestion => {
const s = defaultsDeep({}, suggestion, suggestionDefaults);
return {
...s,
hash: getSuggestionHash(s),
};
};
return {
append: (suggestion: VisualizationSuggestion) => {
this.list.push(withDefaults(suggestion));
},
};
}
}

View File

@@ -518,6 +518,7 @@ const getStyles = (theme: GrafanaTheme2) => {
return {
container: css({
height: '100%',
position: 'relative',
}),
panel: css({

View File

@@ -37,6 +37,12 @@ export function SidebarResizer() {
return;
}
// mouse is moving with no buttons pressed
if (!e.buttons) {
dragStart.current = null;
return;
}
const diff = e.clientX - dragStart.current;
dragStart.current = e.clientX;

View File

@@ -164,6 +164,7 @@ func (hs *HTTPServer) getFrontendSettings(c *contextmodel.ReqContext) (*dtos.Fro
ModuleHash: hs.pluginAssets.ModuleHash(c.Req.Context(), panel),
BaseURL: panel.BaseURL,
SkipDataQuery: panel.SkipDataQuery,
Suggestions: panel.Suggestions,
HideFromList: panel.HideFromList,
ReleaseState: string(panel.State),
Signature: string(panel.Signature),

View File

@@ -222,6 +222,10 @@ var (
// MStatTotalRepositories is a metric total amount of repositories
MStatTotalRepositories prometheus.Gauge
// MUnifiedStorageMigrationStatus indicates the migration status for unified storage in this instance.
// Possible values: 0 (default/undefined), 1 (migration disabled), 2 (migration would run).
MUnifiedStorageMigrationStatus prometheus.Gauge
)
const (
@@ -691,6 +695,12 @@ func init() {
Help: "total amount of repositories",
Namespace: ExporterName,
})
MUnifiedStorageMigrationStatus = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "unified_storage_migration_status",
Help: "indicates whether this instance would run unified storage migrations (0=undefined, 1=migration disabled, 2=would run)",
Namespace: ExporterName,
})
}
// SetBuildInformation sets the build information for this binary
@@ -829,5 +839,6 @@ func initMetricVars(reg prometheus.Registerer) {
MStatTotalRepositories,
MFolderIDsAPICount,
MFolderIDsServiceCount,
MUnifiedStorageMigrationStatus,
)
}

View File

@@ -319,6 +319,7 @@ type PanelDTO struct {
HideFromList bool `json:"hideFromList"`
Sort int `json:"sort"`
SkipDataQuery bool `json:"skipDataQuery"`
Suggestions bool `json:"suggestions,omitempty"`
ReleaseState string `json:"state"`
BaseURL string `json:"baseUrl"`
Signature string `json:"signature"`

View File

@@ -105,6 +105,7 @@ type JSONData struct {
// Panel settings
SkipDataQuery bool `json:"skipDataQuery"`
Suggestions bool `json:"suggestions,omitempty"`
// App settings
AutoEnabled bool `json:"autoEnabled"`

View File

@@ -156,6 +156,9 @@ func (i *Identity) GetExtra() map[string][]string {
if i.GetOrgRole().IsValid() {
extra["user-instance-role"] = []string{string(i.GetOrgRole())}
}
if i.AccessTokenClaims != nil && i.AccessTokenClaims.Rest.ServiceIdentity != "" {
extra[authn.ServiceIdentityKey] = []string{i.AccessTokenClaims.Rest.ServiceIdentity}
}
return extra
}

View File

@@ -0,0 +1,140 @@
package authn
import (
"testing"
authnlib "github.com/grafana/authlib/authn"
"github.com/grafana/authlib/types"
"github.com/grafana/grafana/pkg/services/org"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestIdentity_GetExtra(t *testing.T) {
tests := []struct {
name string
identity *Identity
expected map[string][]string
}{
{
name: "returns empty map when no extra fields are set",
identity: &Identity{
ID: "1",
Type: types.TypeUser,
},
expected: map[string][]string{
"user-instance-role": {"None"},
},
},
{
name: "returns id-token when IDToken is set",
identity: &Identity{
ID: "1",
Type: types.TypeUser,
IDToken: "test-id-token",
},
expected: map[string][]string{
"id-token": {"test-id-token"},
"user-instance-role": {"None"},
},
},
{
name: "returns user-instance-role when OrgRole is valid",
identity: &Identity{
ID: "1",
Type: types.TypeUser,
OrgID: 1,
OrgRoles: map[int64]org.RoleType{1: "Admin"},
},
expected: map[string][]string{
"user-instance-role": {"Admin"},
},
},
{
name: "returns service-identity when AccessTokenClaims contains ServiceIdentity",
identity: &Identity{
ID: "1",
Type: types.TypeAccessPolicy,
AccessTokenClaims: &authnlib.Claims[authnlib.AccessTokenClaims]{
Rest: authnlib.AccessTokenClaims{
ServiceIdentity: "secrets-manager",
},
},
},
expected: map[string][]string{
string(authnlib.ServiceIdentityKey): {"secrets-manager"},
"user-instance-role": {"None"},
},
},
{
name: "returns all extra fields when multiple are set",
identity: &Identity{
ID: "1",
Type: types.TypeUser,
OrgID: 1,
IDToken: "test-id-token",
OrgRoles: map[int64]org.RoleType{1: "Editor"},
AccessTokenClaims: &authnlib.Claims[authnlib.AccessTokenClaims]{
Rest: authnlib.AccessTokenClaims{
ServiceIdentity: "custom-service",
},
},
},
expected: map[string][]string{
"id-token": {"test-id-token"},
"user-instance-role": {"Editor"},
string(authnlib.ServiceIdentityKey): {"custom-service"},
},
},
{
name: "does not include service-identity when AccessTokenClaims is nil",
identity: &Identity{
ID: "1",
Type: types.TypeUser,
AccessTokenClaims: nil,
},
expected: map[string][]string{
"user-instance-role": {"None"},
},
},
{
name: "does not include service-identity when ServiceIdentity is empty",
identity: &Identity{
ID: "1",
Type: types.TypeUser,
AccessTokenClaims: &authnlib.Claims[authnlib.AccessTokenClaims]{
Rest: authnlib.AccessTokenClaims{
ServiceIdentity: "",
},
},
},
expected: map[string][]string{
"user-instance-role": {"None"},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
extra := tt.identity.GetExtra()
assert.Equal(t, tt.expected, extra)
})
}
}
func TestIdentity_GetExtra_ServiceIdentityKey(t *testing.T) {
// Test that the ServiceIdentityKey constant matches authlib's constant
identity := &Identity{
ID: "1",
Type: types.TypeAccessPolicy,
AccessTokenClaims: &authnlib.Claims[authnlib.AccessTokenClaims]{
Rest: authnlib.AccessTokenClaims{
ServiceIdentity: "test-service",
},
},
}
extra := identity.GetExtra()
require.Contains(t, extra, string(authnlib.ServiceIdentityKey))
assert.Equal(t, []string{"test-service"}, extra[string(authnlib.ServiceIdentityKey)])
}

View File

@@ -1884,6 +1884,14 @@ var (
Owner: grafanaDatavizSquad,
Expression: "false",
},
{
Name: "externalVizSuggestions",
Description: "Enable all plugins to supply visualization suggestions (including 3rd party plugins)",
Stage: FeatureStageExperimental,
FrontendOnly: true,
Owner: grafanaDatavizSquad,
Expression: "false",
},
{
Name: "preventPanelChromeOverflow",
Description: "Restrict PanelChrome contents with overflow: hidden;",

View File

@@ -256,6 +256,7 @@ cdnPluginsUrls,experimental,@grafana/plugins-platform-backend,false,false,false
pluginInstallAPISync,experimental,@grafana/plugins-platform-backend,false,false,false
newGauge,experimental,@grafana/dataviz-squad,false,false,true
newVizSuggestions,preview,@grafana/dataviz-squad,false,false,true
externalVizSuggestions,experimental,@grafana/dataviz-squad,false,false,true
preventPanelChromeOverflow,preview,@grafana/grafana-frontend-platform,false,false,true
jaegerEnableGrpcEndpoint,experimental,@grafana/oss-big-tent,false,false,false
pluginStoreServiceLoading,experimental,@grafana/plugins-platform-backend,false,false,false
1 Name Stage Owner requiresDevMode RequiresRestart FrontendOnly
256 pluginInstallAPISync experimental @grafana/plugins-platform-backend false false false
257 newGauge experimental @grafana/dataviz-squad false false true
258 newVizSuggestions preview @grafana/dataviz-squad false false true
259 externalVizSuggestions experimental @grafana/dataviz-squad false false true
260 preventPanelChromeOverflow preview @grafana/grafana-frontend-platform false false true
261 jaegerEnableGrpcEndpoint experimental @grafana/oss-big-tent false false false
262 pluginStoreServiceLoading experimental @grafana/plugins-platform-backend false false false

View File

@@ -1383,6 +1383,20 @@
"codeowner": "@grafana/identity-access-team"
}
},
{
"metadata": {
"name": "externalVizSuggestions",
"resourceVersion": "1763498528748",
"creationTimestamp": "2025-11-18T20:42:08Z"
},
"spec": {
"description": "Enable all plugins to supply visualization suggestions (including 3rd party plugins)",
"stage": "experimental",
"codeowner": "@grafana/dataviz-squad",
"frontend": true,
"expression": "false"
}
},
{
"metadata": {
"name": "extraThemes",

View File

@@ -1,261 +0,0 @@
[
"CREATE TABLE `alert` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `version` INT64 NOT NULL, `dashboard_id` INT64 NOT NULL, `panel_id` INT64 NOT NULL, `org_id` INT64 NOT NULL, `name` STRING(255) NOT NULL, `message` STRING(MAX) NOT NULL, `state` STRING(190) NOT NULL, `settings` STRING(MAX), `frequency` INT64 NOT NULL, `handler` INT64 NOT NULL, `severity` STRING(MAX) NOT NULL, `silenced` BOOL NOT NULL, `execution_error` STRING(MAX) NOT NULL, `eval_data` STRING(MAX), `eval_date` TIMESTAMP, `new_state_date` TIMESTAMP NOT NULL, `state_changes` INT64 NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `for` INT64) PRIMARY KEY (id)",
"CREATE INDEX `IDX_alert_dashboard_id` ON `alert` (dashboard_id)",
"CREATE INDEX `IDX_alert_org_id_id` ON `alert` (org_id, id)",
"CREATE INDEX `IDX_alert_state` ON `alert` (state)",
"CREATE TABLE `alert_configuration` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `alertmanager_configuration` STRING(MAX), `configuration_version` STRING(3) NOT NULL, `created_at` INT64 NOT NULL, `default` BOOL NOT NULL DEFAULT (false), `org_id` INT64 NOT NULL DEFAULT (0), `configuration_hash` STRING(32) NOT NULL DEFAULT ('not-yet-calculated')) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_alert_configuration_org_id` ON `alert_configuration` (org_id)",
"CREATE TABLE `alert_configuration_history` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL DEFAULT (0), `alertmanager_configuration` STRING(MAX) NOT NULL, `configuration_hash` STRING(32) NOT NULL DEFAULT ('not-yet-calculated'), `configuration_version` STRING(3) NOT NULL, `created_at` INT64 NOT NULL, `default` BOOL NOT NULL DEFAULT (false), `last_applied` INT64 NOT NULL DEFAULT (0)) PRIMARY KEY (id)",
"CREATE TABLE `alert_image` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `token` STRING(190) NOT NULL, `path` STRING(190) NOT NULL, `url` STRING(2048) NOT NULL, `created_at` TIMESTAMP NOT NULL, `expires_at` TIMESTAMP NOT NULL) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_alert_image_token` ON `alert_image` (token)",
"CREATE TABLE `alert_instance` (`rule_org_id` INT64 NOT NULL, `rule_uid` STRING(40) NOT NULL, `labels` STRING(MAX) NOT NULL, `labels_hash` STRING(190) NOT NULL, `current_state` STRING(190) NOT NULL, `current_state_since` INT64 NOT NULL, `last_eval_time` INT64 NOT NULL, `current_state_end` INT64 NOT NULL DEFAULT (0), `current_reason` STRING(190), `result_fingerprint` STRING(16), `resolved_at` INT64, `last_sent_at` INT64) PRIMARY KEY (rule_org_id,rule_uid,labels_hash)",
"CREATE INDEX `IDX_alert_instance_rule_org_id_current_state` ON `alert_instance` (rule_org_id, current_state)",
"CREATE INDEX `IDX_alert_instance_rule_org_id_rule_uid_current_state` ON `alert_instance` (rule_org_id, rule_uid, current_state)",
"CREATE TABLE `alert_notification` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `name` STRING(190) NOT NULL, `type` STRING(255) NOT NULL, `settings` STRING(MAX) NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `is_default` BOOL NOT NULL DEFAULT (false), `frequency` INT64, `send_reminder` BOOL DEFAULT (false), `disable_resolve_message` BOOL NOT NULL DEFAULT (false), `uid` STRING(40), `secure_settings` STRING(MAX)) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_alert_notification_org_id_uid` ON `alert_notification` (org_id, uid)",
"CREATE TABLE `alert_notification_state` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `alert_id` INT64 NOT NULL, `notifier_id` INT64 NOT NULL, `state` STRING(50) NOT NULL, `version` INT64 NOT NULL, `updated_at` INT64 NOT NULL, `alert_rule_state_updated_version` INT64 NOT NULL) PRIMARY KEY (id)",
"CREATE INDEX `IDX_alert_notification_state_alert_id` ON `alert_notification_state` (alert_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_alert_notification_state_org_id_alert_id_notifier_id` ON `alert_notification_state` (org_id, alert_id, notifier_id)",
"CREATE TABLE `alert_rule` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `title` STRING(190) NOT NULL, `condition` STRING(190) NOT NULL, `data` STRING(MAX), `updated` TIMESTAMP NOT NULL, `interval_seconds` INT64 NOT NULL DEFAULT (60), `version` INT64 NOT NULL DEFAULT (0), `uid` STRING(40) NOT NULL DEFAULT ('0'), `namespace_uid` STRING(40) NOT NULL, `rule_group` STRING(190) NOT NULL, `no_data_state` STRING(15) NOT NULL DEFAULT ('NoData'), `exec_err_state` STRING(15) NOT NULL DEFAULT ('Alerting'), `for` INT64 NOT NULL DEFAULT (0), `annotations` STRING(MAX), `labels` STRING(MAX), `dashboard_uid` STRING(40), `panel_id` INT64, `rule_group_idx` INT64 NOT NULL DEFAULT (1), `is_paused` BOOL NOT NULL DEFAULT (false), `notification_settings` STRING(MAX), `record` STRING(MAX), `metadata` STRING(MAX), `updated_by` STRING(40), `guid` STRING(36) NOT NULL DEFAULT (''), `missing_series_evals_to_resolve` INT64) PRIMARY KEY (id)",
"CREATE INDEX `IDX_alert_rule_org_id_dashboard_uid_panel_id` ON `alert_rule` (org_id, dashboard_uid, panel_id)",
"CREATE INDEX `IDX_alert_rule_org_id_namespace_uid_rule_group` ON `alert_rule` (org_id, namespace_uid, rule_group)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_alert_rule_guid` ON `alert_rule` (guid)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_alert_rule_org_id_namespace_uid_title` ON `alert_rule` (org_id, namespace_uid, title)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_alert_rule_org_id_uid` ON `alert_rule` (org_id, uid)",
"CREATE TABLE `alert_rule_state` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `rule_uid` STRING(40) NOT NULL, `data` BYTES(MAX) NOT NULL, `updated_at` TIMESTAMP NOT NULL) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_alert_rule_state_org_id_rule_uid` ON `alert_rule_state` (org_id, rule_uid)",
"CREATE TABLE `alert_rule_tag` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `alert_id` INT64 NOT NULL, `tag_id` INT64 NOT NULL) PRIMARY KEY (id)",
"CREATE INDEX `IDX_alert_rule_tag_alert_id` ON `alert_rule_tag` (alert_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_alert_rule_tag_alert_id_tag_id` ON `alert_rule_tag` (alert_id, tag_id)",
"CREATE TABLE `alert_rule_version` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `rule_org_id` INT64 NOT NULL, `rule_uid` STRING(40) NOT NULL DEFAULT ('0'), `rule_namespace_uid` STRING(40) NOT NULL, `rule_group` STRING(190) NOT NULL, `parent_version` INT64 NOT NULL, `restored_from` INT64 NOT NULL, `version` INT64 NOT NULL, `created` TIMESTAMP NOT NULL, `title` STRING(190) NOT NULL, `condition` STRING(190) NOT NULL, `data` STRING(MAX), `interval_seconds` INT64 NOT NULL, `no_data_state` STRING(15) NOT NULL DEFAULT ('NoData'), `exec_err_state` STRING(15) NOT NULL DEFAULT ('Alerting'), `for` INT64 NOT NULL DEFAULT (0), `annotations` STRING(MAX), `labels` STRING(MAX), `rule_group_idx` INT64 NOT NULL DEFAULT (1), `is_paused` BOOL NOT NULL DEFAULT (false), `notification_settings` STRING(MAX), `record` STRING(MAX), `metadata` STRING(MAX), `created_by` STRING(40), `rule_guid` STRING(36) NOT NULL DEFAULT (''), `missing_series_evals_to_resolve` INT64) PRIMARY KEY (id)",
"CREATE INDEX `IDX_alert_rule_version_rule_org_id_rule_namespace_uid_rule_group` ON `alert_rule_version` (rule_org_id, rule_namespace_uid, rule_group)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_alert_rule_version_rule_guid_version` ON `alert_rule_version` (rule_guid, version)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_alert_rule_version_rule_org_id_rule_uid_rule_guid_version` ON `alert_rule_version` (rule_org_id, rule_uid, rule_guid, version)",
"CREATE TABLE `annotation` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `alert_id` INT64, `user_id` INT64, `dashboard_id` INT64, `panel_id` INT64, `category_id` INT64, `type` STRING(25) NOT NULL, `title` STRING(MAX) NOT NULL, `text` STRING(MAX) NOT NULL, `metric` STRING(255), `prev_state` STRING(40) NOT NULL, `new_state` STRING(40) NOT NULL, `data` STRING(MAX) NOT NULL, `epoch` INT64 NOT NULL, `region_id` INT64 DEFAULT (0), `tags` STRING(4096), `created` INT64 DEFAULT (0), `updated` INT64 DEFAULT (0), `epoch_end` INT64 NOT NULL DEFAULT (0)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_annotation_alert_id` ON `annotation` (alert_id)",
"CREATE INDEX `IDX_annotation_org_id_alert_id` ON `annotation` (org_id, alert_id)",
"CREATE INDEX `IDX_annotation_org_id_created` ON `annotation` (org_id, created)",
"CREATE INDEX `IDX_annotation_org_id_dashboard_id_epoch_end_epoch` ON `annotation` (org_id, dashboard_id, epoch_end, epoch)",
"CREATE INDEX `IDX_annotation_org_id_epoch_end_epoch` ON `annotation` (org_id, epoch_end, epoch)",
"CREATE INDEX `IDX_annotation_org_id_type` ON `annotation` (org_id, type)",
"CREATE INDEX `IDX_annotation_org_id_updated` ON `annotation` (org_id, updated)",
"CREATE TABLE `annotation_tag` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `annotation_id` INT64 NOT NULL, `tag_id` INT64 NOT NULL) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_annotation_tag_annotation_id_tag_id` ON `annotation_tag` (annotation_id, tag_id)",
"CREATE TABLE `anon_device` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `client_ip` STRING(255) NOT NULL, `created_at` TIMESTAMP NOT NULL, `device_id` STRING(127) NOT NULL, `updated_at` TIMESTAMP NOT NULL, `user_agent` STRING(255) NOT NULL) PRIMARY KEY (id)",
"CREATE INDEX `IDX_anon_device_updated_at` ON `anon_device` (updated_at)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_anon_device_device_id` ON `anon_device` (device_id)",
"CREATE TABLE `api_key` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `name` STRING(190) NOT NULL, `key` STRING(190) NOT NULL, `role` STRING(255) NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `expires` INT64, `service_account_id` INT64, `last_used_at` TIMESTAMP, `is_revoked` BOOL DEFAULT (false)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_api_key_org_id` ON `api_key` (org_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_api_key_key` ON `api_key` (key)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_api_key_org_id_name` ON `api_key` (org_id, name)",
"CREATE TABLE `autoincrement_sequences` (`name` STRING(128) NOT NULL, `next_value` INT64 NOT NULL) PRIMARY KEY (name)",
"CREATE TABLE `builtin_role` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `role` STRING(190) NOT NULL, `role_id` INT64 NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `org_id` INT64 NOT NULL DEFAULT (0)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_builtin_role_org_id` ON `builtin_role` (org_id)",
"CREATE INDEX `IDX_builtin_role_role_id` ON `builtin_role` (role_id)",
"CREATE INDEX `IDX_builtin_role_role` ON `builtin_role` (role)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_builtin_role_org_id_role_id_role` ON `builtin_role` (org_id, role_id, role)",
"CREATE TABLE `cache_data` (`cache_key` STRING(168) NOT NULL, `data` BYTES(MAX) NOT NULL, `expires` INT64 NOT NULL, `created_at` INT64 NOT NULL) PRIMARY KEY (cache_key)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_cache_data_cache_key` ON `cache_data` (cache_key)",
"CREATE TABLE `cloud_migration_resource` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `uid` STRING(40) NOT NULL, `resource_type` STRING(40) NOT NULL, `resource_uid` STRING(255), `status` STRING(20) NOT NULL, `error_string` STRING(MAX), `snapshot_uid` STRING(40) NOT NULL, `name` STRING(MAX), `parent_name` STRING(MAX), `error_code` STRING(MAX)) PRIMARY KEY (id)",
"CREATE TABLE `cloud_migration_session` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `uid` STRING(40), `auth_token` STRING(MAX), `slug` STRING(MAX) NOT NULL, `stack_id` INT64 NOT NULL, `region_slug` STRING(MAX) NOT NULL, `cluster_slug` STRING(MAX) NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `org_id` INT64 NOT NULL DEFAULT (1)) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_cloud_migration_session_uid` ON `cloud_migration_session` (uid)",
"CREATE TABLE `cloud_migration_snapshot` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `uid` STRING(40), `session_uid` STRING(40), `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `finished` TIMESTAMP, `upload_url` STRING(MAX), `status` STRING(MAX) NOT NULL, `local_directory` STRING(MAX), `gms_snapshot_uid` STRING(MAX), `encryption_key` STRING(MAX), `error_string` STRING(MAX)) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_cloud_migration_snapshot_uid` ON `cloud_migration_snapshot` (uid)",
"CREATE TABLE `correlation` (`uid` STRING(40) NOT NULL, `org_id` INT64 NOT NULL DEFAULT (0), `source_uid` STRING(40) NOT NULL, `target_uid` STRING(40), `label` STRING(MAX) NOT NULL, `description` STRING(MAX) NOT NULL, `config` STRING(MAX), `provisioned` BOOL NOT NULL DEFAULT (false), `type` STRING(40) NOT NULL DEFAULT ('query')) PRIMARY KEY (uid,org_id,source_uid)",
"CREATE INDEX `IDX_correlation_org_id` ON `correlation` (org_id)",
"CREATE INDEX `IDX_correlation_source_uid` ON `correlation` (source_uid)",
"CREATE INDEX `IDX_correlation_uid` ON `correlation` (uid)",
"CREATE TABLE `dashboard` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `version` INT64 NOT NULL, `slug` STRING(189) NOT NULL, `title` STRING(189) NOT NULL, `data` STRING(MAX) NOT NULL, `org_id` INT64 NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `updated_by` INT64, `created_by` INT64, `gnet_id` INT64, `plugin_id` STRING(189), `folder_id` INT64 NOT NULL DEFAULT (0), `is_folder` BOOL NOT NULL DEFAULT (false), `has_acl` BOOL NOT NULL DEFAULT (false), `uid` STRING(40), `is_public` BOOL NOT NULL DEFAULT (false), `deleted` TIMESTAMP, `api_version` STRING(16), `folder_uid` STRING(40)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_dashboard_deleted` ON `dashboard` (deleted)",
"CREATE INDEX `IDX_dashboard_gnet_id` ON `dashboard` (gnet_id)",
"CREATE INDEX `IDX_dashboard_is_folder` ON `dashboard` (is_folder)",
"CREATE INDEX `IDX_dashboard_org_id_folder_id_title` ON `dashboard` (org_id, folder_id, title)",
"CREATE INDEX `IDX_dashboard_org_id_plugin_id` ON `dashboard` (org_id, plugin_id)",
"CREATE INDEX `IDX_dashboard_org_id` ON `dashboard` (org_id)",
"CREATE INDEX `IDX_dashboard_title` ON `dashboard` (title)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_dashboard_org_id_uid` ON `dashboard` (org_id, uid)",
"CREATE TABLE `dashboard_acl` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `dashboard_id` INT64 NOT NULL, `user_id` INT64, `team_id` INT64, `permission` INT64 NOT NULL DEFAULT (4), `role` STRING(20), `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL) PRIMARY KEY (id)",
"CREATE INDEX `IDX_dashboard_acl_dashboard_id` ON `dashboard_acl` (dashboard_id)",
"CREATE INDEX `IDX_dashboard_acl_org_id_role` ON `dashboard_acl` (org_id, role)",
"CREATE INDEX `IDX_dashboard_acl_permission` ON `dashboard_acl` (permission)",
"CREATE INDEX `IDX_dashboard_acl_team_id` ON `dashboard_acl` (team_id)",
"CREATE INDEX `IDX_dashboard_acl_user_id` ON `dashboard_acl` (user_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_dashboard_acl_dashboard_id_team_id` ON `dashboard_acl` (dashboard_id, team_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_dashboard_acl_dashboard_id_user_id` ON `dashboard_acl` (dashboard_id, user_id)",
"CREATE TABLE `dashboard_provisioning` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `dashboard_id` INT64, `name` STRING(150) NOT NULL, `external_id` STRING(MAX) NOT NULL, `updated` INT64 NOT NULL DEFAULT (0), `check_sum` STRING(32)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_dashboard_provisioning_dashboard_id_name` ON `dashboard_provisioning` (dashboard_id, name)",
"CREATE INDEX `IDX_dashboard_provisioning_dashboard_id` ON `dashboard_provisioning` (dashboard_id)",
"CREATE TABLE `dashboard_public` (`uid` STRING(40) NOT NULL, `dashboard_uid` STRING(40) NOT NULL, `org_id` INT64 NOT NULL, `time_settings` STRING(MAX), `template_variables` STRING(MAX), `access_token` STRING(32) NOT NULL, `created_by` INT64 NOT NULL, `updated_by` INT64, `created_at` TIMESTAMP NOT NULL, `updated_at` TIMESTAMP, `is_enabled` BOOL NOT NULL DEFAULT (false), `annotations_enabled` BOOL NOT NULL DEFAULT (false), `time_selection_enabled` BOOL NOT NULL DEFAULT (false), `share` STRING(64) NOT NULL DEFAULT ('public')) PRIMARY KEY (uid)",
"CREATE INDEX `IDX_dashboard_public_config_org_id_dashboard_uid` ON `dashboard_public` (org_id, dashboard_uid)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_dashboard_public_config_access_token` ON `dashboard_public` (access_token)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_dashboard_public_config_uid` ON `dashboard_public` (uid)",
"CREATE TABLE `dashboard_public_email_share` (`uid` STRING(40) NOT NULL, `public_dashboard_uid` STRING(64) NOT NULL, `recipient` STRING(255) NOT NULL, `type` STRING(64) NOT NULL DEFAULT ('email'), `created_at` TIMESTAMP NOT NULL, `updated_at` TIMESTAMP NOT NULL) PRIMARY KEY (uid)",
"CREATE TABLE `dashboard_public_magic_link` (`uid` STRING(40) NOT NULL, `token_uuid` STRING(64) NOT NULL, `public_dashboard_uid` STRING(64) NOT NULL, `email` STRING(255) NOT NULL, `created_at` TIMESTAMP NOT NULL, `updated_at` TIMESTAMP NOT NULL) PRIMARY KEY (uid)",
"CREATE TABLE `dashboard_public_session` (`uid` STRING(40) NOT NULL, `cookie_uuid` STRING(64) NOT NULL, `public_dashboard_uid` STRING(64) NOT NULL, `email` STRING(255) NOT NULL, `created_at` TIMESTAMP NOT NULL, `updated_at` TIMESTAMP NOT NULL, `last_seen_at` TIMESTAMP) PRIMARY KEY (uid)",
"CREATE TABLE `dashboard_public_usage_by_day` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `public_dashboard_uid` STRING(255) NOT NULL, `day` STRING(40) NOT NULL, `views` INT64 NOT NULL, `queries` INT64 NOT NULL, `errors` INT64 NOT NULL, `load_duration` FLOAT64 NOT NULL, `cached_queries` INT64 NOT NULL DEFAULT (0)) PRIMARY KEY (id)",
"CREATE TABLE `dashboard_snapshot` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `name` STRING(255) NOT NULL, `key` STRING(190) NOT NULL, `delete_key` STRING(190) NOT NULL, `org_id` INT64 NOT NULL, `user_id` INT64 NOT NULL, `external` BOOL NOT NULL, `external_url` STRING(255) NOT NULL, `dashboard` STRING(MAX) NOT NULL, `expires` TIMESTAMP NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `external_delete_url` STRING(255), `dashboard_encrypted` BYTES(MAX)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_dashboard_snapshot_user_id` ON `dashboard_snapshot` (user_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_dashboard_snapshot_delete_key` ON `dashboard_snapshot` (delete_key)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_dashboard_snapshot_key` ON `dashboard_snapshot` (key)",
"CREATE TABLE `dashboard_tag` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `dashboard_id` INT64 NOT NULL, `term` STRING(50) NOT NULL, `dashboard_uid` STRING(40), `org_id` INT64 DEFAULT (1)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_dashboard_tag_dashboard_id` ON `dashboard_tag` (dashboard_id)",
"CREATE TABLE `dashboard_usage_by_day` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `dashboard_id` INT64 NOT NULL, `day` STRING(40) NOT NULL, `views` INT64 NOT NULL, `queries` INT64 NOT NULL, `errors` INT64 NOT NULL, `load_duration` FLOAT64 NOT NULL, `cached_queries` INT64 NOT NULL DEFAULT (0), `dashboard_uid` STRING(40), `org_id` INT64 DEFAULT (1)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_dashboard_usage_by_day_dashboard_id` ON `dashboard_usage_by_day` (dashboard_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_dashboard_usage_by_day_dashboard_id_day` ON `dashboard_usage_by_day` (dashboard_id, day)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_dashboard_usage_by_day_dashboard_uid_org_id_day` ON `dashboard_usage_by_day` (dashboard_uid, org_id, day)",
"CREATE TABLE `dashboard_usage_sums` (`dashboard_id` INT64 NOT NULL, `updated` TIMESTAMP NOT NULL, `views_last_1_days` INT64 NOT NULL, `views_last_7_days` INT64 NOT NULL, `views_last_30_days` INT64 NOT NULL, `views_total` INT64 NOT NULL, `queries_last_1_days` INT64 NOT NULL, `queries_last_7_days` INT64 NOT NULL, `queries_last_30_days` INT64 NOT NULL, `queries_total` INT64 NOT NULL, `errors_last_1_days` INT64 NOT NULL DEFAULT (0), `errors_last_7_days` INT64 NOT NULL DEFAULT (0), `errors_last_30_days` INT64 NOT NULL DEFAULT (0), `errors_total` INT64 NOT NULL DEFAULT (0), `dashboard_uid` STRING(40), `org_id` INT64 DEFAULT (1)) PRIMARY KEY (dashboard_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_dashboard_usage_sums_org_id_dashboard_uid` ON `dashboard_usage_sums` (org_id, dashboard_uid)",
"CREATE TABLE `dashboard_version` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `dashboard_id` INT64 NOT NULL, `parent_version` INT64 NOT NULL, `restored_from` INT64 NOT NULL, `version` INT64 NOT NULL, `created` TIMESTAMP NOT NULL, `created_by` INT64 NOT NULL, `message` STRING(MAX) NOT NULL, `data` STRING(MAX), `api_version` STRING(16)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_dashboard_version_dashboard_id` ON `dashboard_version` (dashboard_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_dashboard_version_dashboard_id_version` ON `dashboard_version` (dashboard_id, version)",
"CREATE TABLE `data_keys` (`name` STRING(100) NOT NULL, `active` BOOL NOT NULL, `scope` STRING(30) NOT NULL, `provider` STRING(50) NOT NULL, `encrypted_data` BYTES(MAX) NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `label` STRING(100)) PRIMARY KEY (name)",
"CREATE TABLE `data_source` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `version` INT64 NOT NULL, `type` STRING(255) NOT NULL, `name` STRING(190) NOT NULL, `access` STRING(255) NOT NULL, `url` STRING(255) NOT NULL, `password` STRING(255), `user` STRING(255), `database` STRING(255), `basic_auth` BOOL NOT NULL, `basic_auth_user` STRING(255), `basic_auth_password` STRING(255), `is_default` BOOL NOT NULL, `json_data` STRING(MAX), `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `with_credentials` BOOL NOT NULL DEFAULT (false), `secure_json_data` STRING(MAX), `read_only` BOOL, `uid` STRING(40) NOT NULL DEFAULT ('0'), `is_prunable` BOOL DEFAULT (false), `api_version` STRING(20)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_data_source_org_id_is_default` ON `data_source` (org_id, is_default)",
"CREATE INDEX `IDX_data_source_org_id` ON `data_source` (org_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_data_source_org_id_name` ON `data_source` (org_id, name)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_data_source_org_id_uid` ON `data_source` (org_id, uid)",
"CREATE TABLE `data_source_acl` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `data_source_id` INT64 NOT NULL, `team_id` INT64 NOT NULL, `user_id` INT64 NOT NULL, `permission` INT64 NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL) PRIMARY KEY (id)",
"CREATE INDEX `IDX_data_source_acl_data_source_id` ON `data_source_acl` (data_source_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_data_source_acl_data_source_id_team_id_user_id` ON `data_source_acl` (data_source_id, team_id, user_id)",
"CREATE TABLE `data_source_cache` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `data_source_id` INT64 NOT NULL, `enabled` BOOL NOT NULL, `ttl_ms` INT64 NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `use_default_ttl` BOOL NOT NULL DEFAULT (true), `data_source_uid` STRING(40) NOT NULL DEFAULT ('0'), `ttl_resources_ms` INT64 NOT NULL DEFAULT (300000)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_data_source_cache_data_source_id` ON `data_source_cache` (data_source_id)",
"CREATE INDEX `IDX_data_source_cache_data_source_uid` ON `data_source_cache` (data_source_uid)",
"CREATE TABLE `data_source_usage_by_day` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `data_source_id` INT64 NOT NULL, `day` STRING(40) NOT NULL, `queries` INT64 NOT NULL, `errors` INT64 NOT NULL, `load_duration_ms` INT64 NOT NULL) PRIMARY KEY (id)",
"CREATE INDEX `IDX_data_source_usage_by_day_data_source_id` ON `data_source_usage_by_day` (data_source_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_data_source_usage_by_day_data_source_id_day` ON `data_source_usage_by_day` (data_source_id, day)",
"CREATE TABLE `entity_event` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `entity_id` STRING(1024) NOT NULL, `event_type` STRING(8) NOT NULL, `created` INT64 NOT NULL) PRIMARY KEY (id)",
"CREATE TABLE `file` (`path` STRING(1024) NOT NULL, `path_hash` STRING(64) NOT NULL, `parent_folder_path_hash` STRING(64) NOT NULL, `contents` BYTES(MAX), `etag` STRING(32) NOT NULL, `cache_control` STRING(128) NOT NULL, `content_disposition` STRING(128) NOT NULL, `updated` TIMESTAMP NOT NULL, `created` TIMESTAMP NOT NULL, `size` INT64 NOT NULL, `mime_type` STRING(255) NOT NULL) PRIMARY KEY (path_hash)",
"CREATE INDEX `IDX_file_parent_folder_path_hash` ON `file` (parent_folder_path_hash)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_file_path_hash` ON `file` (path_hash)",
"CREATE TABLE `file_meta` (`path_hash` STRING(64) NOT NULL, `key` STRING(191) NOT NULL, `value` STRING(1024) NOT NULL) PRIMARY KEY (path_hash,key)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_file_meta_path_hash_key` ON `file_meta` (path_hash, key)",
"CREATE TABLE `folder` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `uid` STRING(40) NOT NULL, `org_id` INT64 NOT NULL, `title` STRING(189) NOT NULL, `description` STRING(255), `parent_uid` STRING(40), `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_folder_org_id_uid` ON `folder` (org_id, uid)",
"CREATE TABLE `kv_store` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `namespace` STRING(190) NOT NULL, `key` STRING(190) NOT NULL, `value` STRING(MAX) NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_kv_store_org_id_namespace_key` ON `kv_store` (org_id, namespace, key)",
"CREATE TABLE `library_element` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `folder_id` INT64 NOT NULL, `uid` STRING(40) NOT NULL, `name` STRING(150) NOT NULL, `kind` INT64 NOT NULL, `type` STRING(40) NOT NULL, `description` STRING(2048) NOT NULL, `model` STRING(MAX) NOT NULL, `created` TIMESTAMP NOT NULL, `created_by` INT64 NOT NULL, `updated` TIMESTAMP NOT NULL, `updated_by` INT64 NOT NULL, `version` INT64 NOT NULL, `folder_uid` STRING(40)) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_library_element_org_id_folder_id_name_kind` ON `library_element` (org_id, folder_id, name, kind)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_library_element_org_id_folder_uid_name_kind` ON `library_element` (org_id, folder_uid, name, kind)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_library_element_org_id_uid` ON `library_element` (org_id, uid)",
"CREATE TABLE `library_element_connection` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `element_id` INT64 NOT NULL, `kind` INT64 NOT NULL, `connection_id` INT64 NOT NULL, `created` TIMESTAMP NOT NULL, `created_by` INT64 NOT NULL) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_library_element_connection_element_id_kind_connection_id` ON `library_element_connection` (element_id, kind, connection_id)",
"CREATE TABLE `license_token` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `token` STRING(MAX) NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL) PRIMARY KEY (id)",
"CREATE TABLE `login_attempt` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `username` STRING(190) NOT NULL, `ip_address` STRING(30) NOT NULL, `created` INT64 NOT NULL DEFAULT (0)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_login_attempt_username` ON `login_attempt` (username)",
"CREATE TABLE `migration_log` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `migration_id` STRING(255) NOT NULL, `sql` STRING(MAX) NOT NULL, `success` BOOL NOT NULL, `error` STRING(MAX) NOT NULL, `timestamp` TIMESTAMP NOT NULL) PRIMARY KEY (id)",
"CREATE TABLE `ngalert_configuration` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `alertmanagers` STRING(MAX), `created_at` INT64 NOT NULL, `updated_at` INT64 NOT NULL, `send_alerts_to` INT64 NOT NULL DEFAULT (0)) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_ngalert_configuration_org_id` ON `ngalert_configuration` (org_id)",
"CREATE TABLE `org` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `version` INT64 NOT NULL, `name` STRING(190) NOT NULL, `address1` STRING(255), `address2` STRING(255), `city` STRING(255), `state` STRING(255), `zip_code` STRING(50), `country` STRING(255), `billing_email` STRING(255), `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_org_name` ON `org` (name)",
"CREATE TABLE `org_user` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `user_id` INT64 NOT NULL, `role` STRING(20) NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL) PRIMARY KEY (id)",
"CREATE INDEX `IDX_org_user_org_id` ON `org_user` (org_id)",
"CREATE INDEX `IDX_org_user_user_id` ON `org_user` (user_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_org_user_org_id_user_id` ON `org_user` (org_id, user_id)",
"CREATE TABLE `permission` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `role_id` INT64 NOT NULL, `action` STRING(190) NOT NULL, `scope` STRING(190) NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `kind` STRING(40) NOT NULL DEFAULT (''), `attribute` STRING(40) NOT NULL DEFAULT (''), `identifier` STRING(40) NOT NULL DEFAULT ('')) PRIMARY KEY (id)",
"CREATE INDEX `IDX_permission_identifier` ON `permission` (identifier)",
"CREATE INDEX `IDX_permission_role_id` ON `permission` (role_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_permission_action_scope_role_id` ON `permission` (action, scope, role_id)",
"CREATE TABLE `playlist` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `name` STRING(255) NOT NULL, `interval` STRING(255) NOT NULL, `org_id` INT64 NOT NULL, `created_at` INT64 NOT NULL DEFAULT (0), `updated_at` INT64 NOT NULL DEFAULT (0), `uid` STRING(80) NOT NULL DEFAULT ('0')) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_playlist_org_id_uid` ON `playlist` (org_id, uid)",
"CREATE TABLE `playlist_item` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `playlist_id` INT64 NOT NULL, `type` STRING(255) NOT NULL, `value` STRING(MAX) NOT NULL, `title` STRING(MAX) NOT NULL, `order` INT64 NOT NULL) PRIMARY KEY (id)",
"CREATE TABLE `plugin_setting` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL DEFAULT (1), `plugin_id` STRING(190) NOT NULL, `enabled` BOOL NOT NULL, `pinned` BOOL NOT NULL, `json_data` STRING(MAX), `secure_json_data` STRING(MAX), `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `plugin_version` STRING(50)) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_plugin_setting_org_id_plugin_id` ON `plugin_setting` (org_id, plugin_id)",
"CREATE TABLE `preferences` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `user_id` INT64 NOT NULL, `version` INT64 NOT NULL, `home_dashboard_id` INT64 NOT NULL, `timezone` STRING(50) NOT NULL, `theme` STRING(20) NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `team_id` INT64, `week_start` STRING(10), `json_data` STRING(MAX)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_preferences_org_id` ON `preferences` (org_id)",
"CREATE INDEX `IDX_preferences_user_id` ON `preferences` (user_id)",
"CREATE TABLE `provenance_type` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `record_key` STRING(190) NOT NULL, `record_type` STRING(190) NOT NULL, `provenance` STRING(190) NOT NULL) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_provenance_type_record_type_record_key_org_id` ON `provenance_type` (record_type, record_key, org_id)",
"CREATE TABLE `query_history` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `uid` STRING(40) NOT NULL, `org_id` INT64 NOT NULL, `datasource_uid` STRING(40) NOT NULL, `created_by` INT64, `created_at` INT64 NOT NULL, `comment` STRING(MAX) NOT NULL, `queries` STRING(MAX) NOT NULL) PRIMARY KEY (id)",
"CREATE INDEX `IDX_query_history_org_id_created_by_datasource_uid` ON `query_history` (org_id, created_by, datasource_uid)",
"CREATE TABLE `query_history_details` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `query_history_item_uid` STRING(40) NOT NULL, `datasource_uid` STRING(40) NOT NULL) PRIMARY KEY (id)",
"CREATE TABLE `query_history_star` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `query_uid` STRING(40) NOT NULL, `user_id` INT64, `org_id` INT64 NOT NULL DEFAULT (1)) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_query_history_star_user_id_query_uid` ON `query_history_star` (user_id, query_uid)",
"CREATE TABLE `quota` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64, `user_id` INT64, `target` STRING(190) NOT NULL, `limit` INT64 NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_quota_org_id_user_id_target` ON `quota` (org_id, user_id, target)",
"CREATE TABLE `recording_rules` (`id` STRING(128) NOT NULL, `target_ref_id` STRING(128) NOT NULL, `name` STRING(128) NOT NULL, `description` STRING(MAX) NOT NULL, `org_id` INT64 NOT NULL, `interval` INT64 NOT NULL, `range` INT64 NOT NULL, `active` BOOL NOT NULL DEFAULT (false), `count` BOOL NOT NULL DEFAULT (false), `queries` BYTES(MAX) NOT NULL, `created_at` TIMESTAMP NOT NULL, `prom_name` STRING(128)) PRIMARY KEY (id,target_ref_id)",
"CREATE TABLE `remote_write_targets` (`id` STRING(128) NOT NULL, `data_source_uid` STRING(128) NOT NULL, `write_path` STRING(128) NOT NULL, `org_id` INT64 NOT NULL) PRIMARY KEY (id,data_source_uid,write_path)",
"CREATE TABLE `report` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `user_id` INT64 NOT NULL, `org_id` INT64 NOT NULL, `dashboard_id` INT64 NOT NULL, `name` STRING(MAX) NOT NULL, `recipients` STRING(MAX) NOT NULL, `reply_to` STRING(MAX), `message` STRING(MAX), `schedule_frequency` STRING(32) NOT NULL, `schedule_day` STRING(32) NOT NULL, `schedule_hour` INT64 NOT NULL, `schedule_minute` INT64 NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `schedule_timezone` STRING(50) NOT NULL DEFAULT ('Europe/Stockholm'), `time_from` STRING(255), `time_to` STRING(255), `pdf_landscape` BOOL, `schedule_day_of_month` STRING(32), `pdf_layout` STRING(255), `pdf_orientation` STRING(32), `dashboard_uid` STRING(40), `template_vars` STRING(MAX), `enable_dashboard_url` BOOL, `state` STRING(32), `enable_csv` BOOL, `schedule_start` INT64, `schedule_end` INT64, `schedule_interval_frequency` STRING(32), `schedule_interval_amount` INT64, `schedule_workdays_only` BOOL, `formats` STRING(190) NOT NULL DEFAULT ('[\"pdf\"]'), `scale_factor` INT64 NOT NULL DEFAULT (2), `uid` STRING(40), `pdf_show_template_variables` BOOL NOT NULL DEFAULT (false), `pdf_combine_one_file` BOOL NOT NULL DEFAULT (true), `subject` STRING(MAX)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_report_dashboard_id` ON `report` (dashboard_id)",
"CREATE INDEX `IDX_report_org_id` ON `report` (org_id)",
"CREATE INDEX `IDX_report_user_id` ON `report` (user_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_report_org_id_uid` ON `report` (org_id, uid)",
"CREATE TABLE `report_dashboards` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `report_id` INT64 NOT NULL, `dashboard_uid` STRING(40) NOT NULL DEFAULT (''), `report_variables` STRING(MAX), `time_to` STRING(255), `time_from` STRING(255), `created` TIMESTAMP) PRIMARY KEY (id)",
"CREATE INDEX `IDX_report_dashboards_report_id` ON `report_dashboards` (report_id)",
"CREATE TABLE `report_settings` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `user_id` INT64 NOT NULL, `org_id` INT64 NOT NULL, `branding_report_logo_url` STRING(MAX), `branding_email_logo_url` STRING(MAX), `branding_email_footer_link` STRING(MAX), `branding_email_footer_text` STRING(MAX), `branding_email_footer_mode` STRING(50), `pdf_theme` STRING(40) NOT NULL DEFAULT ('light'), `embedded_image_theme` STRING(40) NOT NULL DEFAULT ('dark')) PRIMARY KEY (id)",
"CREATE TABLE `role` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `name` STRING(190) NOT NULL, `description` STRING(MAX), `version` INT64 NOT NULL, `org_id` INT64 NOT NULL, `uid` STRING(40) NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `display_name` STRING(190), `group_name` STRING(190), `hidden` BOOL NOT NULL DEFAULT (false)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_role_org_id` ON `role` (org_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_role_org_id_name` ON `role` (org_id, name)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_role_uid` ON `role` (uid)",
"CREATE TABLE `secrets` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `namespace` STRING(255) NOT NULL, `type` STRING(255) NOT NULL, `value` STRING(MAX), `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL) PRIMARY KEY (id)",
"CREATE TABLE `seed_assignment` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `builtin_role` STRING(190) NOT NULL, `role_name` STRING(190), `action` STRING(190), `scope` STRING(190), `origin` STRING(190)) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_seed_assignment_builtin_role_action_scope` ON `seed_assignment` (builtin_role, action, scope)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_seed_assignment_builtin_role_role_name` ON `seed_assignment` (builtin_role, role_name)",
"CREATE TABLE `server_lock` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `operation_uid` STRING(100) NOT NULL, `version` INT64 NOT NULL, `last_execution` INT64 NOT NULL) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_server_lock_operation_uid` ON `server_lock` (operation_uid)",
"CREATE TABLE `session` (`key` STRING(16) NOT NULL, `data` BYTES(MAX) NOT NULL, `expiry` INT64 NOT NULL) PRIMARY KEY (key)",
"CREATE TABLE `setting` (`section` STRING(100) NOT NULL, `key` STRING(100) NOT NULL, `value` STRING(MAX) NOT NULL, `encrypted_value` STRING(MAX)) PRIMARY KEY (section,key)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_setting_section_key` ON `setting` (section, key)",
"CREATE TABLE `short_url` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `uid` STRING(40) NOT NULL, `path` STRING(MAX) NOT NULL, `created_by` INT64, `created_at` INT64 NOT NULL, `last_seen_at` INT64) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_short_url_org_id_uid` ON `short_url` (org_id, uid)",
"CREATE TABLE `signing_key` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `key_id` STRING(255) NOT NULL, `private_key` STRING(MAX) NOT NULL, `added_at` TIMESTAMP NOT NULL, `expires_at` TIMESTAMP, `alg` STRING(255) NOT NULL) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_signing_key_key_id` ON `signing_key` (key_id)",
"CREATE TABLE `sso_setting` (`id` STRING(40) NOT NULL, `provider` STRING(255) NOT NULL, `settings` STRING(MAX) NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `is_deleted` BOOL NOT NULL DEFAULT (false)) PRIMARY KEY (id)",
"CREATE TABLE `star` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `user_id` INT64 NOT NULL, `dashboard_id` INT64 NOT NULL, `dashboard_uid` STRING(40), `org_id` INT64 DEFAULT (1), `updated` TIMESTAMP) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_star_user_id_dashboard_id` ON `star` (user_id, dashboard_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_star_user_id_dashboard_uid_org_id` ON `star` (user_id, dashboard_uid, org_id)",
"CREATE TABLE `tag` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `key` STRING(100) NOT NULL, `value` STRING(100) NOT NULL) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_tag_key_value` ON `tag` (key, value)",
"CREATE TABLE `team` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `name` STRING(190) NOT NULL, `org_id` INT64 NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `uid` STRING(40), `email` STRING(190)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_team_org_id` ON `team` (org_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_team_org_id_name` ON `team` (org_id, name)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_team_org_id_uid` ON `team` (org_id, uid)",
"CREATE TABLE `team_group` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `team_id` INT64 NOT NULL, `group_id` STRING(190) NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL) PRIMARY KEY (id)",
"CREATE INDEX `IDX_team_group_group_id` ON `team_group` (group_id)",
"CREATE INDEX `IDX_team_group_org_id` ON `team_group` (org_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_team_group_org_id_team_id_group_id` ON `team_group` (org_id, team_id, group_id)",
"CREATE TABLE `team_member` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `team_id` INT64 NOT NULL, `user_id` INT64 NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `external` BOOL, `permission` INT64) PRIMARY KEY (id)",
"CREATE INDEX `IDX_team_member_org_id` ON `team_member` (org_id)",
"CREATE INDEX `IDX_team_member_team_id` ON `team_member` (team_id)",
"CREATE INDEX `IDX_team_member_user_id_org_id` ON `team_member` (user_id, org_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_team_member_org_id_team_id_user_id` ON `team_member` (org_id, team_id, user_id)",
"CREATE TABLE `team_role` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `team_id` INT64 NOT NULL, `role_id` INT64 NOT NULL, `created` TIMESTAMP NOT NULL) PRIMARY KEY (id)",
"CREATE INDEX `IDX_team_role_org_id` ON `team_role` (org_id)",
"CREATE INDEX `IDX_team_role_team_id` ON `team_role` (team_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_team_role_org_id_team_id_role_id` ON `team_role` (org_id, team_id, role_id)",
"CREATE TABLE `temp_user` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `version` INT64 NOT NULL, `email` STRING(190) NOT NULL, `name` STRING(255), `role` STRING(20), `code` STRING(190) NOT NULL, `status` STRING(20) NOT NULL, `invited_by_user_id` INT64, `email_sent` BOOL NOT NULL, `email_sent_on` TIMESTAMP, `remote_addr` STRING(255), `created` INT64 NOT NULL DEFAULT (0), `updated` INT64 NOT NULL DEFAULT (0)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_temp_user_code` ON `temp_user` (code)",
"CREATE INDEX `IDX_temp_user_email` ON `temp_user` (email)",
"CREATE INDEX `IDX_temp_user_org_id` ON `temp_user` (org_id)",
"CREATE INDEX `IDX_temp_user_status` ON `temp_user` (status)",
"CREATE TABLE `test_data` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `metric1` STRING(20), `metric2` STRING(150), `value_big_int` INT64, `value_double` FLOAT64, `value_float` FLOAT64, `value_int` INT64, `time_epoch` INT64 NOT NULL, `time_date_time` TIMESTAMP NOT NULL, `time_time_stamp` TIMESTAMP NOT NULL) PRIMARY KEY (id)",
"CREATE TABLE `user` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `version` INT64 NOT NULL, `login` STRING(190) NOT NULL, `email` STRING(190) NOT NULL, `name` STRING(255), `password` STRING(255), `salt` STRING(50), `rands` STRING(50), `company` STRING(255), `org_id` INT64 NOT NULL, `is_admin` BOOL NOT NULL, `email_verified` BOOL, `theme` STRING(255), `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL, `help_flags1` INT64 NOT NULL DEFAULT (0), `last_seen_at` TIMESTAMP, `is_disabled` BOOL NOT NULL DEFAULT (false), `is_service_account` BOOL DEFAULT (false), `uid` STRING(40), `is_provisioned` BOOL NOT NULL DEFAULT (false)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_user_login_email` ON `user` (login, email)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_user_email` ON `user` (email)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_user_login` ON `user` (login)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_user_uid` ON `user` (uid)",
"CREATE TABLE `user_auth` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `user_id` INT64 NOT NULL, `auth_module` STRING(190) NOT NULL, `auth_id` STRING(190), `created` TIMESTAMP NOT NULL, `o_auth_access_token` STRING(MAX), `o_auth_refresh_token` STRING(MAX), `o_auth_token_type` STRING(MAX), `o_auth_expiry` TIMESTAMP, `o_auth_id_token` STRING(MAX)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_user_auth_auth_module_auth_id` ON `user_auth` (auth_module, auth_id)",
"CREATE INDEX `IDX_user_auth_user_id` ON `user_auth` (user_id)",
"CREATE TABLE `user_auth_token` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `user_id` INT64 NOT NULL, `auth_token` STRING(100) NOT NULL, `prev_auth_token` STRING(100) NOT NULL, `user_agent` STRING(255) NOT NULL, `client_ip` STRING(255) NOT NULL, `auth_token_seen` BOOL NOT NULL, `seen_at` INT64, `rotated_at` INT64 NOT NULL, `created_at` INT64 NOT NULL, `updated_at` INT64 NOT NULL, `revoked_at` INT64, `external_session_id` INT64) PRIMARY KEY (id)",
"CREATE INDEX `IDX_user_auth_token_revoked_at` ON `user_auth_token` (revoked_at)",
"CREATE INDEX `IDX_user_auth_token_user_id` ON `user_auth_token` (user_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_user_auth_token_auth_token` ON `user_auth_token` (auth_token)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_user_auth_token_prev_auth_token` ON `user_auth_token` (prev_auth_token)",
"CREATE TABLE `user_dashboard_views` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `user_id` INT64 NOT NULL, `dashboard_id` INT64 NOT NULL, `viewed` TIMESTAMP NOT NULL, `org_id` INT64, `dashboard_uid` STRING(40)) PRIMARY KEY (id)",
"CREATE INDEX `IDX_user_dashboard_views_dashboard_id` ON `user_dashboard_views` (dashboard_id)",
"CREATE INDEX `IDX_user_dashboard_views_org_id_dashboard_uid` ON `user_dashboard_views` (org_id, dashboard_uid)",
"CREATE INDEX `IDX_user_dashboard_views_user_id` ON `user_dashboard_views` (user_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_user_dashboard_views_user_id_dashboard_id` ON `user_dashboard_views` (user_id, dashboard_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_user_dashboard_views_user_id_org_id_dashboard_uid` ON `user_dashboard_views` (user_id, org_id, dashboard_uid)",
"CREATE TABLE `user_external_session` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `user_auth_id` INT64 NOT NULL, `user_id` INT64 NOT NULL, `auth_module` STRING(190) NOT NULL, `access_token` STRING(MAX), `id_token` STRING(MAX), `refresh_token` STRING(MAX), `session_id` STRING(1024), `session_id_hash` STRING(44), `name_id` STRING(1024), `name_id_hash` STRING(44), `expires_at` TIMESTAMP, `created_at` TIMESTAMP NOT NULL) PRIMARY KEY (id)",
"CREATE TABLE `user_role` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `org_id` INT64 NOT NULL, `user_id` INT64 NOT NULL, `role_id` INT64 NOT NULL, `created` TIMESTAMP NOT NULL, `group_mapping_uid` STRING(40) DEFAULT ('')) PRIMARY KEY (id)",
"CREATE INDEX `IDX_user_role_org_id` ON `user_role` (org_id)",
"CREATE INDEX `IDX_user_role_user_id` ON `user_role` (user_id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_user_role_org_id_user_id_role_id_group_mapping_uid` ON `user_role` (org_id, user_id, role_id, group_mapping_uid)",
"CREATE TABLE `user_stats` (`id` INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE), `user_id` INT64 NOT NULL, `billing_role` STRING(40) NOT NULL, `created` TIMESTAMP NOT NULL, `updated` TIMESTAMP NOT NULL) PRIMARY KEY (id)",
"CREATE UNIQUE NULL_FILTERED INDEX `UQE_user_stats_user_id` ON `user_stats` (user_id)",
"CREATE TABLE resource ( namespace STRING(63), resource_group STRING(190), resource STRING(190), name STRING(253), folder STRING(253), value BYTES(MAX), resource_version TIMESTAMP NOT NULL OPTIONS ( allow_commit_timestamp = true ), previous_resource_version TIMESTAMP, ) PRIMARY KEY (namespace, resource_group, resource, name)",
"CREATE TABLE resource_history ( namespace STRING(63), resource_group STRING(190), resource STRING(190), name STRING(253), folder STRING(253), value BYTES(MAX), resource_version TIMESTAMP NOT NULL OPTIONS ( allow_commit_timestamp = true ), previous_resource_version TIMESTAMP, action INT64, ) PRIMARY KEY (namespace, resource_group, resource, name, resource_version DESC)",
"CREATE TABLE resource_blob ( uid STRING(36) NOT NULL, resource_key STRING(MAX) NOT NULL, content_type STRING(100), value BYTES(MAX), ) PRIMARY KEY (uid)",
"CREATE CHANGE STREAM resource_stream FOR resource"
]

View File

@@ -1,761 +0,0 @@
[
"create migration_log table",
"create user table",
"add unique index user.login",
"add unique index user.email",
"drop index UQE_user_login - v1",
"drop index UQE_user_email - v1",
"Rename table user to user_v1 - v1",
"create user table v2",
"create index UQE_user_login - v2",
"create index UQE_user_email - v2",
"copy data_source v1 to v2",
"Drop old table user_v1",
"Add column help_flags1 to user table",
"Update user table charset",
"Add last_seen_at column to user",
"Add missing user data",
"Add is_disabled column to user",
"Add index user.login/user.email",
"Add is_service_account column to user",
"Update is_service_account column to nullable",
"Add uid column to user",
"Update uid column values for users",
"Add unique index user_uid",
"Add is_provisioned column to user",
"update login field with orgid to allow for multiple service accounts with same name across orgs",
"update service accounts login field orgid to appear only once",
"update login and email fields to lowercase",
"update login and email fields to lowercase2",
"create temp user table v1-7",
"create index IDX_temp_user_email - v1-7",
"create index IDX_temp_user_org_id - v1-7",
"create index IDX_temp_user_code - v1-7",
"create index IDX_temp_user_status - v1-7",
"Update temp_user table charset",
"drop index IDX_temp_user_email - v1",
"drop index IDX_temp_user_org_id - v1",
"drop index IDX_temp_user_code - v1",
"drop index IDX_temp_user_status - v1",
"Rename table temp_user to temp_user_tmp_qwerty - v1",
"create temp_user v2",
"create index IDX_temp_user_email - v2",
"create index IDX_temp_user_org_id - v2",
"create index IDX_temp_user_code - v2",
"create index IDX_temp_user_status - v2",
"copy temp_user v1 to v2",
"drop temp_user_tmp_qwerty",
"Set created for temp users that will otherwise prematurely expire",
"create star table",
"add unique index star.user_id_dashboard_id",
"Add column dashboard_uid in star",
"Add column org_id in star",
"Add column updated in star",
"add index in star table on dashboard_uid, org_id and user_id columns",
"create org table v1",
"create index UQE_org_name - v1",
"create org_user table v1",
"create index IDX_org_user_org_id - v1",
"create index UQE_org_user_org_id_user_id - v1",
"create index IDX_org_user_user_id - v1",
"Update org table charset",
"Update org_user table charset",
"Migrate all Read Only Viewers to Viewers",
"create dashboard table",
"add index dashboard.account_id",
"add unique index dashboard_account_id_slug",
"create dashboard_tag table",
"add unique index dashboard_tag.dasboard_id_term",
"drop index UQE_dashboard_tag_dashboard_id_term - v1",
"Rename table dashboard to dashboard_v1 - v1",
"create dashboard v2",
"create index IDX_dashboard_org_id - v2",
"create index UQE_dashboard_org_id_slug - v2",
"copy dashboard v1 to v2",
"drop table dashboard_v1",
"alter dashboard.data to mediumtext v1",
"Add column updated_by in dashboard - v2",
"Add column created_by in dashboard - v2",
"Add column gnetId in dashboard",
"Add index for gnetId in dashboard",
"Add column plugin_id in dashboard",
"Add index for plugin_id in dashboard",
"Add index for dashboard_id in dashboard_tag",
"Update dashboard table charset",
"Update dashboard_tag table charset",
"Add column folder_id in dashboard",
"Add column isFolder in dashboard",
"Add column has_acl in dashboard",
"Add column uid in dashboard",
"Update uid column values in dashboard",
"Add unique index dashboard_org_id_uid",
"Remove unique index org_id_slug",
"Update dashboard title length",
"Add unique index for dashboard_org_id_title_folder_id",
"create dashboard_provisioning",
"Rename table dashboard_provisioning to dashboard_provisioning_tmp_qwerty - v1",
"create dashboard_provisioning v2",
"create index IDX_dashboard_provisioning_dashboard_id - v2",
"create index IDX_dashboard_provisioning_dashboard_id_name - v2",
"copy dashboard_provisioning v1 to v2",
"drop dashboard_provisioning_tmp_qwerty",
"Add check_sum column",
"Add index for dashboard_title",
"delete tags for deleted dashboards",
"delete stars for deleted dashboards",
"Add index for dashboard_is_folder",
"Add isPublic for dashboard",
"Add deleted for dashboard",
"Add index for deleted",
"Add column dashboard_uid in dashboard_tag",
"Add column org_id in dashboard_tag",
"Add missing dashboard_uid and org_id to dashboard_tag",
"Add apiVersion for dashboard",
"Add missing dashboard_uid and org_id to star",
"create data_source table",
"add index data_source.account_id",
"add unique index data_source.account_id_name",
"drop index IDX_data_source_account_id - v1",
"drop index UQE_data_source_account_id_name - v1",
"Rename table data_source to data_source_v1 - v1",
"create data_source table v2",
"create index IDX_data_source_org_id - v2",
"create index UQE_data_source_org_id_name - v2",
"Drop old table data_source_v1 #2",
"Add column with_credentials",
"Add secure json data column",
"Update data_source table charset",
"Update initial version to 1",
"Add read_only data column",
"Migrate logging ds to loki ds",
"Update json_data with nulls",
"Add uid column",
"Update uid value",
"Add unique index datasource_org_id_uid",
"add unique index datasource_org_id_is_default",
"Add is_prunable column",
"Add api_version column",
"create api_key table",
"add index api_key.account_id",
"add index api_key.key",
"add index api_key.account_id_name",
"drop index IDX_api_key_account_id - v1",
"drop index UQE_api_key_key - v1",
"drop index UQE_api_key_account_id_name - v1",
"Rename table api_key to api_key_v1 - v1",
"create api_key table v2",
"create index IDX_api_key_org_id - v2",
"create index UQE_api_key_key - v2",
"create index UQE_api_key_org_id_name - v2",
"copy api_key v1 to v2",
"Drop old table api_key_v1",
"Update api_key table charset",
"Add expires to api_key table",
"Add service account foreign key",
"set service account foreign key to nil if 0",
"Add last_used_at to api_key table",
"Add is_revoked column to api_key table",
"create dashboard_snapshot table v4",
"drop table dashboard_snapshot_v4 #1",
"create dashboard_snapshot table v5 #2",
"create index UQE_dashboard_snapshot_key - v5",
"create index UQE_dashboard_snapshot_delete_key - v5",
"create index IDX_dashboard_snapshot_user_id - v5",
"alter dashboard_snapshot to mediumtext v2",
"Update dashboard_snapshot table charset",
"Add column external_delete_url to dashboard_snapshots table",
"Add encrypted dashboard json column",
"Change dashboard_encrypted column to MEDIUMBLOB",
"create quota table v1",
"create index UQE_quota_org_id_user_id_target - v1",
"Update quota table charset",
"create plugin_setting table",
"create index UQE_plugin_setting_org_id_plugin_id - v1",
"Add column plugin_version to plugin_settings",
"Update plugin_setting table charset",
"update NULL org_id to 1",
"make org_id NOT NULL and DEFAULT VALUE 1",
"create session table",
"Drop old table playlist table",
"Drop old table playlist_item table",
"create playlist table v2",
"create playlist item table v2",
"Update playlist table charset",
"Update playlist_item table charset",
"Add playlist column created_at",
"Add playlist column updated_at",
"drop preferences table v2",
"drop preferences table v3",
"create preferences table v3",
"Update preferences table charset",
"Add column team_id in preferences",
"Update team_id column values in preferences",
"Add column week_start in preferences",
"Add column preferences.json_data",
"alter preferences.json_data to mediumtext v1",
"Add preferences index org_id",
"Add preferences index user_id",
"create alert table v1",
"add index alert org_id \u0026 id ",
"add index alert state",
"add index alert dashboard_id",
"Create alert_rule_tag table v1",
"Add unique index alert_rule_tag.alert_id_tag_id",
"drop index UQE_alert_rule_tag_alert_id_tag_id - v1",
"Rename table alert_rule_tag to alert_rule_tag_v1 - v1",
"Create alert_rule_tag table v2",
"create index UQE_alert_rule_tag_alert_id_tag_id - Add unique index alert_rule_tag.alert_id_tag_id V2",
"copy alert_rule_tag v1 to v2",
"drop table alert_rule_tag_v1",
"create alert_notification table v1",
"Add column is_default",
"Add column frequency",
"Add column send_reminder",
"Add column disable_resolve_message",
"add index alert_notification org_id \u0026 name",
"Update alert table charset",
"Update alert_notification table charset",
"create notification_journal table v1",
"add index notification_journal org_id \u0026 alert_id \u0026 notifier_id",
"drop alert_notification_journal",
"create alert_notification_state table v1",
"add index alert_notification_state org_id \u0026 alert_id \u0026 notifier_id",
"Add for to alert table",
"Add column uid in alert_notification",
"Update uid column values in alert_notification",
"Add unique index alert_notification_org_id_uid",
"Remove unique index org_id_name",
"Add column secure_settings in alert_notification",
"alter alert.settings to mediumtext",
"Add non-unique index alert_notification_state_alert_id",
"Add non-unique index alert_rule_tag_alert_id",
"Drop old annotation table v4",
"create annotation table v5",
"add index annotation 0 v3",
"add index annotation 1 v3",
"add index annotation 2 v3",
"add index annotation 3 v3",
"add index annotation 4 v3",
"Update annotation table charset",
"Add column region_id to annotation table",
"Drop category_id index",
"Add column tags to annotation table",
"Create annotation_tag table v2",
"Add unique index annotation_tag.annotation_id_tag_id",
"drop index UQE_annotation_tag_annotation_id_tag_id - v2",
"Rename table annotation_tag to annotation_tag_v2 - v2",
"Create annotation_tag table v3",
"create index UQE_annotation_tag_annotation_id_tag_id - Add unique index annotation_tag.annotation_id_tag_id V3",
"copy annotation_tag v2 to v3",
"drop table annotation_tag_v2",
"Update alert annotations and set TEXT to empty",
"Add created time to annotation table",
"Add updated time to annotation table",
"Add index for created in annotation table",
"Add index for updated in annotation table",
"Convert existing annotations from seconds to milliseconds",
"Add epoch_end column",
"Add index for epoch_end",
"Make epoch_end the same as epoch",
"Move region to single row",
"Remove index org_id_epoch from annotation table",
"Remove index org_id_dashboard_id_panel_id_epoch from annotation table",
"Add index for org_id_dashboard_id_epoch_end_epoch on annotation table",
"Add index for org_id_epoch_end_epoch on annotation table",
"Remove index org_id_epoch_epoch_end from annotation table",
"Add index for alert_id on annotation table",
"Increase tags column to length 4096",
"Increase prev_state column to length 40 not null",
"Increase new_state column to length 40 not null",
"create test_data table",
"create dashboard_version table v1",
"add index dashboard_version.dashboard_id",
"add unique index dashboard_version.dashboard_id and dashboard_version.version",
"Set dashboard version to 1 where 0",
"save existing dashboard data in dashboard_version table v1",
"alter dashboard_version.data to mediumtext v1",
"Add apiVersion for dashboard_version",
"create team table",
"add index team.org_id",
"add unique index team_org_id_name",
"Add column uid in team",
"Update uid column values in team",
"Add unique index team_org_id_uid",
"create team member table",
"add index team_member.org_id",
"add unique index team_member_org_id_team_id_user_id",
"add index team_member.team_id",
"Add column email to team table",
"Add column external to team_member table",
"Add column permission to team_member table",
"add unique index team_member_user_id_org_id",
"create dashboard acl table",
"add index dashboard_acl_dashboard_id",
"add unique index dashboard_acl_dashboard_id_user_id",
"add unique index dashboard_acl_dashboard_id_team_id",
"add index dashboard_acl_user_id",
"add index dashboard_acl_team_id",
"add index dashboard_acl_org_id_role",
"add index dashboard_permission",
"save default acl rules in dashboard_acl table",
"delete acl rules for deleted dashboards and folders",
"create tag table",
"add index tag.key_value",
"create login attempt table",
"add index login_attempt.username",
"drop index IDX_login_attempt_username - v1",
"Rename table login_attempt to login_attempt_tmp_qwerty - v1",
"create login_attempt v2",
"create index IDX_login_attempt_username - v2",
"copy login_attempt v1 to v2",
"drop login_attempt_tmp_qwerty",
"create user auth table",
"create index IDX_user_auth_auth_module_auth_id - v1",
"alter user_auth.auth_id to length 190",
"Add OAuth access token to user_auth",
"Add OAuth refresh token to user_auth",
"Add OAuth token type to user_auth",
"Add OAuth expiry to user_auth",
"Add index to user_id column in user_auth",
"Add OAuth ID token to user_auth",
"create server_lock table",
"add index server_lock.operation_uid",
"create user auth token table",
"add unique index user_auth_token.auth_token",
"add unique index user_auth_token.prev_auth_token",
"add index user_auth_token.user_id",
"Add revoked_at to the user auth token",
"add index user_auth_token.revoked_at",
"add external_session_id to user_auth_token",
"create cache_data table",
"add unique index cache_data.cache_key",
"create short_url table v1",
"add index short_url.org_id-uid",
"alter table short_url alter column created_by type to bigint",
"delete alert_definition table",
"recreate alert_definition table",
"add index in alert_definition on org_id and title columns",
"add index in alert_definition on org_id and uid columns",
"alter alert_definition table data column to mediumtext in mysql",
"drop index in alert_definition on org_id and title columns",
"drop index in alert_definition on org_id and uid columns",
"add unique index in alert_definition on org_id and title columns",
"add unique index in alert_definition on org_id and uid columns",
"Add column paused in alert_definition",
"drop alert_definition table",
"delete alert_definition_version table",
"recreate alert_definition_version table",
"add index in alert_definition_version table on alert_definition_id and version columns",
"add index in alert_definition_version table on alert_definition_uid and version columns",
"alter alert_definition_version table data column to mediumtext in mysql",
"drop alert_definition_version table",
"create alert_instance table",
"add index in alert_instance table on def_org_id, def_uid and current_state columns",
"add index in alert_instance table on def_org_id, current_state columns",
"add column current_state_end to alert_instance",
"remove index def_org_id, def_uid, current_state on alert_instance",
"remove index def_org_id, current_state on alert_instance",
"rename def_org_id to rule_org_id in alert_instance",
"rename def_uid to rule_uid in alert_instance",
"add index rule_org_id, rule_uid, current_state on alert_instance",
"add index rule_org_id, current_state on alert_instance",
"add current_reason column related to current_state",
"add result_fingerprint column to alert_instance",
"create alert_rule table",
"add index in alert_rule on org_id and title columns",
"add index in alert_rule on org_id and uid columns",
"add index in alert_rule on org_id, namespace_uid, group_uid columns",
"alter alert_rule table data column to mediumtext in mysql",
"add column for to alert_rule",
"add column annotations to alert_rule",
"add column labels to alert_rule",
"remove unique index from alert_rule on org_id, title columns",
"add index in alert_rule on org_id, namespase_uid and title columns",
"add dashboard_uid column to alert_rule",
"add panel_id column to alert_rule",
"add index in alert_rule on org_id, dashboard_uid and panel_id columns",
"add rule_group_idx column to alert_rule",
"add is_paused column to alert_rule table",
"fix is_paused column for alert_rule table",
"create alert_rule_version table",
"add index in alert_rule_version table on rule_org_id, rule_uid and version columns",
"add index in alert_rule_version table on rule_org_id, rule_namespace_uid and rule_group columns",
"alter alert_rule_version table data column to mediumtext in mysql",
"add column for to alert_rule_version",
"add column annotations to alert_rule_version",
"add column labels to alert_rule_version",
"add rule_group_idx column to alert_rule_version",
"add is_paused column to alert_rule_versions table",
"fix is_paused column for alert_rule_version table",
"create_alert_configuration_table",
"Add column default in alert_configuration",
"alert alert_configuration alertmanager_configuration column from TEXT to MEDIUMTEXT if mysql",
"add column org_id in alert_configuration",
"add index in alert_configuration table on org_id column",
"add configuration_hash column to alert_configuration",
"create_ngalert_configuration_table",
"add index in ngalert_configuration on org_id column",
"add column send_alerts_to in ngalert_configuration",
"create provenance_type table",
"add index to uniquify (record_key, record_type, org_id) columns",
"create alert_image table",
"add unique index on token to alert_image table",
"support longer URLs in alert_image table",
"create_alert_configuration_history_table",
"drop non-unique orgID index on alert_configuration",
"drop unique orgID index on alert_configuration if exists",
"extract alertmanager configuration history to separate table",
"add unique index on orgID to alert_configuration",
"add last_applied column to alert_configuration_history",
"create library_element table v1",
"add index library_element org_id-folder_id-name-kind",
"create library_element_connection table v1",
"add index library_element_connection element_id-kind-connection_id",
"add unique index library_element org_id_uid",
"increase max description length to 2048",
"alter library_element model to mediumtext",
"add library_element folder uid",
"populate library_element folder_uid",
"add index library_element org_id-folder_uid-name-kind",
"clone move dashboard alerts to unified alerting",
"create data_keys table",
"create secrets table",
"rename data_keys name column to id",
"add name column into data_keys",
"copy data_keys id column values into name",
"rename data_keys name column to label",
"rename data_keys id column back to name",
"create kv_store table v1",
"add index kv_store.org_id-namespace-key",
"update dashboard_uid and panel_id from existing annotations",
"create permission table",
"add unique index permission.role_id",
"add unique index role_id_action_scope",
"create role table",
"add column display_name",
"add column group_name",
"add index role.org_id",
"add unique index role_org_id_name",
"add index role_org_id_uid",
"create team role table",
"add index team_role.org_id",
"add unique index team_role_org_id_team_id_role_id",
"add index team_role.team_id",
"create user role table",
"add index user_role.org_id",
"add unique index user_role_org_id_user_id_role_id",
"add index user_role.user_id",
"create builtin role table",
"add index builtin_role.role_id",
"add index builtin_role.name",
"Add column org_id to builtin_role table",
"add index builtin_role.org_id",
"add unique index builtin_role_org_id_role_id_role",
"Remove unique index role_org_id_uid",
"add unique index role.uid",
"create seed assignment table",
"add unique index builtin_role_role_name",
"add column hidden to role table",
"permission kind migration",
"permission attribute migration",
"permission identifier migration",
"add permission identifier index",
"add permission action scope role_id index",
"remove permission role_id action scope index",
"add group mapping UID column to user_role table",
"add user_role org ID, user ID, role ID, group mapping UID index",
"remove user_role org ID, user ID, role ID index",
"create query_history table v1",
"add index query_history.org_id-created_by-datasource_uid",
"alter table query_history alter column created_by type to bigint",
"create query_history_details table v1",
"rbac disabled migrator",
"teams permissions migration",
"dashboard permissions",
"dashboard permissions uid scopes",
"drop managed folder create actions",
"alerting notification permissions",
"create query_history_star table v1",
"add index query_history.user_id-query_uid",
"add column org_id in query_history_star",
"alter table query_history_star_mig column user_id type to bigint",
"create correlation table v1",
"add index correlations.uid",
"add index correlations.source_uid",
"add correlation config column",
"drop index IDX_correlation_uid - v1",
"drop index IDX_correlation_source_uid - v1",
"Rename table correlation to correlation_tmp_qwerty - v1",
"create correlation v2",
"create index IDX_correlation_uid - v2",
"create index IDX_correlation_source_uid - v2",
"create index IDX_correlation_org_id - v2",
"copy correlation v1 to v2",
"drop correlation_tmp_qwerty",
"add provisioning column",
"add type column",
"create entity_events table",
"create dashboard public config v1",
"drop index UQE_dashboard_public_config_uid - v1",
"drop index IDX_dashboard_public_config_org_id_dashboard_uid - v1",
"Drop old dashboard public config table",
"recreate dashboard public config v1",
"create index UQE_dashboard_public_config_uid - v1",
"create index IDX_dashboard_public_config_org_id_dashboard_uid - v1",
"drop index UQE_dashboard_public_config_uid - v2",
"drop index IDX_dashboard_public_config_org_id_dashboard_uid - v2",
"Drop public config table",
"Recreate dashboard public config v2",
"create index UQE_dashboard_public_config_uid - v2",
"create index IDX_dashboard_public_config_org_id_dashboard_uid - v2",
"create index UQE_dashboard_public_config_access_token - v2",
"Rename table dashboard_public_config to dashboard_public - v2",
"add annotations_enabled column",
"add time_selection_enabled column",
"delete orphaned public dashboards",
"add share column",
"backfill empty share column fields with default of public",
"create file table",
"file table idx: path natural pk",
"file table idx: parent_folder_path_hash fast folder retrieval",
"create file_meta table",
"file table idx: path key",
"set path collation in file table",
"migrate contents column to mediumblob for MySQL",
"managed permissions migration",
"managed folder permissions alert actions migration",
"RBAC action name migrator",
"Add UID column to playlist",
"Update uid column values in playlist",
"Add index for uid in playlist",
"update group index for alert rules",
"managed folder permissions alert actions repeated migration",
"admin only folder/dashboard permission",
"add action column to seed_assignment",
"add scope column to seed_assignment",
"remove unique index builtin_role_role_name before nullable update",
"update seed_assignment role_name column to nullable",
"add unique index builtin_role_name back",
"add unique index builtin_role_action_scope",
"add primary key to seed_assigment",
"add origin column to seed_assignment",
"add origin to plugin seed_assignment",
"prevent seeding OnCall access",
"managed folder permissions alert actions repeated fixed migration",
"managed folder permissions library panel actions migration",
"migrate external alertmanagers to datsourcse",
"create folder table",
"Add index for parent_uid",
"Add unique index for folder.uid and folder.org_id",
"Update folder title length",
"Add unique index for folder.title and folder.parent_uid",
"Remove unique index for folder.title and folder.parent_uid",
"Add unique index for title, parent_uid, and org_id",
"Sync dashboard and folder table",
"Remove ghost folders from the folder table",
"Remove unique index UQE_folder_uid_org_id",
"Add unique index UQE_folder_org_id_uid",
"Remove unique index UQE_folder_title_parent_uid_org_id",
"Add unique index UQE_folder_org_id_parent_uid_title",
"Remove index IDX_folder_parent_uid_org_id",
"Remove unique index UQE_folder_org_id_parent_uid_title",
"create anon_device table",
"add unique index anon_device.device_id",
"add index anon_device.updated_at",
"create signing_key table",
"add unique index signing_key.key_id",
"set legacy alert migration status in kvstore",
"migrate record of created folders during legacy migration to kvstore",
"Add folder_uid for dashboard",
"Populate dashboard folder_uid column",
"Add unique index for dashboard_org_id_folder_uid_title",
"Delete unique index for dashboard_org_id_folder_id_title",
"Delete unique index for dashboard_org_id_folder_uid_title",
"Add unique index for dashboard_org_id_folder_uid_title_is_folder",
"Restore index for dashboard_org_id_folder_id_title",
"Remove unique index for dashboard_org_id_folder_uid_title_is_folder",
"create sso_setting table",
"copy kvstore migration status to each org",
"add back entry for orgid=0 migrated status",
"managed dashboard permissions annotation actions migration",
"create cloud_migration table v1",
"create cloud_migration_run table v1",
"add stack_id column",
"add region_slug column",
"add cluster_slug column",
"add migration uid column",
"Update uid column values for migration",
"Add unique index migration_uid",
"add migration run uid column",
"Update uid column values for migration run",
"Add unique index migration_run_uid",
"Rename table cloud_migration to cloud_migration_session_tmp_qwerty - v1",
"create cloud_migration_session v2",
"create index UQE_cloud_migration_session_uid - v2",
"copy cloud_migration_session v1 to v2",
"drop cloud_migration_session_tmp_qwerty",
"Rename table cloud_migration_run to cloud_migration_snapshot_tmp_qwerty - v1",
"create cloud_migration_snapshot v2",
"create index UQE_cloud_migration_snapshot_uid - v2",
"copy cloud_migration_snapshot v1 to v2",
"drop cloud_migration_snapshot_tmp_qwerty",
"add snapshot upload_url column",
"add snapshot status column",
"add snapshot local_directory column",
"add snapshot gms_snapshot_uid column",
"add snapshot encryption_key column",
"add snapshot error_string column",
"create cloud_migration_resource table v1",
"delete cloud_migration_snapshot.result column",
"add cloud_migration_resource.name column",
"add cloud_migration_resource.parent_name column",
"add cloud_migration_session.org_id column",
"add cloud_migration_resource.error_code column",
"increase resource_uid column length",
"alter kv_store.value to longtext",
"add notification_settings column to alert_rule table",
"add notification_settings column to alert_rule_version table",
"removing scope from alert.instances:read action migration",
"managed folder permissions alerting silences actions migration",
"add record column to alert_rule table",
"add record column to alert_rule_version table",
"add resolved_at column to alert_instance table",
"add last_sent_at column to alert_instance table",
"Enable traceQL streaming for all Tempo datasources",
"Add scope to alert.notifications.receivers:read and alert.notifications.receivers.secrets:read",
"add metadata column to alert_rule table",
"add metadata column to alert_rule_version table",
"delete orphaned service account permissions",
"adding action set permissions",
"create user_external_session table",
"increase name_id column length to 1024",
"increase session_id column length to 1024",
"remove scope from alert.notifications.receivers:create",
"add created_by column to alert_rule_version table",
"add updated_by column to alert_rule table",
"add alert_rule_state table",
"add index to alert_rule_state on org_id and rule_uid columns",
"add guid column to alert_rule table",
"add rule_guid column to alert_rule_version table",
"drop index in alert_rule_version table on rule_org_id, rule_uid and version columns",
"populate rule guid in alert rule table",
"add index in alert_rule_version table on rule_org_id, rule_uid, rule_guid and version columns",
"add index in alert_rule_version table on rule_guid and version columns",
"add index in alert_rule table on guid columns",
"add missing_series_evals_to_resolve column to alert_rule",
"add missing_series_evals_to_resolve column to alert_rule_version",
"create data_source_usage_by_day table",
"create data_source_usage_by_day(data_source_id) index",
"create data_source_usage_by_day(data_source_id, day) unique index",
"create dashboard_usage_by_day table",
"create dashboard_usage_sums table",
"create dashboard_usage_by_day(dashboard_id) index",
"create dashboard_usage_by_day(dashboard_id, day) index",
"add column errors_last_1_days to dashboard_usage_sums",
"add column errors_last_7_days to dashboard_usage_sums",
"add column errors_last_30_days to dashboard_usage_sums",
"add column errors_total to dashboard_usage_sums",
"create dashboard_public_usage_by_day table",
"add column cached_queries to dashboard_usage_by_day table",
"add column cached_queries to dashboard_public_usage_by_day table",
"add column dashboard_uid to dashboard_usage_sums",
"add column org_id to dashboard_usage_sums",
"add column dashboard_uid to dashboard_usage_by_day",
"add column org_id to dashboard_usage_by_day",
"create dashboard_usage_by_day(dashboard_uid, org_id, day) unique index",
"Add missing dashboard_uid and org_id to dashboard_usage_by_day and dashboard_usage_sums",
"Add dashboard_usage_sums(org_id, dashboard_uid) index",
"create user_dashboard_views table",
"add index user_dashboard_views.user_id",
"add index user_dashboard_views.dashboard_id",
"add unique index user_dashboard_views_user_id_dashboard_id",
"add org_id column to user_dashboard_views",
"add dashboard_uid column to user_dashboard_views",
"add unique index user_dashboard_views_org_id_dashboard_uid",
"add unique index user_dashboard_views_org_id_user_id_dashboard_uid",
"populate user_dashboard_views.dashboard_uid and user_dashboard_views.org_id from dashboard table",
"create user_stats table",
"add unique index user_stats(user_id)",
"create data_source_cache table",
"add index data_source_cache.data_source_id",
"add use_default_ttl column",
"add data_source_cache.data_source_uid column",
"remove abandoned data_source_cache records",
"update data_source_cache.data_source_uid value",
"add index data_source_cache.data_source_uid",
"add data_source_cache.ttl_resources_ms column",
"update data_source_cache.ttl_resources_ms to have the same value as ttl_ms",
"create data_source_acl table",
"add index data_source_acl.data_source_id",
"add unique index datasource_acl.unique",
"create license_token table",
"drop recorded_queries table v14",
"drop recording_rules table v14",
"create recording_rules table v14",
"create remote_write_targets table v1",
"Add prom_name to recording_rules table",
"ensure remote_write_targets table",
"create report config table v1",
"Add index report.user_id",
"add index to dashboard_id",
"add index to org_id",
"Add timezone to the report",
"Add time_from to the report",
"Add time_to to the report",
"Add PDF landscape option to the report",
"Add monthly day scheduling option to the report",
"Add PDF layout option to the report",
"Add PDF orientation option to the report",
"Update report pdf_orientation from pdf_landscape",
"create report settings table",
"Add dashboard_uid field to the report",
"Add template_vars field to the report",
"Add option to include dashboard url in the report",
"Add state field to the report",
"Add option to add CSV files to the report",
"Add scheduling start date",
"Add missing schedule_start date for old reports",
"Add scheduling end date",
"Add schedulinng custom interval frequency",
"Add scheduling custom interval amount",
"Add workdays only flag to report",
"create report dashboards table",
"Add index report_dashboards.report_id",
"Migrate report fields into report_dashboards",
"Add formats option to the report",
"Migrate reports with csv enabled",
"Migrate ancient reports",
"Add created column in report_dashboards",
"Add scale_factor to the report",
"Alter scale_factor from TINYINT to SMALLINT",
"Add uid column to report",
"Add unique index reports_org_id_uid",
"Add pdf show template variable values to the report",
"Add pdf combine in one file",
"Add pdf theme to report settings table",
"Add email subject to the report",
"Populate email subject with report name",
"Add embedded image theme to report settings table",
"create team group table",
"add index team_group.org_id",
"add unique index team_group.org_id_team_id_group_id",
"add index team_group.group_id",
"create settings table",
"add unique index settings.section_key",
"add setting.encrypted_value",
"migrate role names",
"rename orgs roles",
"remove duplicated org role",
"migrate alerting role names",
"data source permissions",
"data source uid permissions",
"rename permissions:delegate scope",
"remove invalid managed permissions",
"builtin role migration",
"seed permissions migration",
"managed permissions migration enterprise",
"create table dashboard_public_email_share",
"create table dashboard_public_magic_link",
"create table dashboard_public_session",
"add last_seen_at column"
]

View File

@@ -8,6 +8,7 @@ import (
"github.com/grafana/grafana/pkg/util/osutil"
)
// nolint:unused
var migratedUnifiedResources = []string{
//"playlists.playlist.grafana.app",
"folders.folder.grafana.app",
@@ -58,14 +59,16 @@ func (cfg *Cfg) setUnifiedStorageConfig() {
// Set indexer config for unified storage
section := cfg.Raw.Section("unified_storage")
// TODO: Re-enable once migrations are ready and disabled on cloud
//cfg.DisableDataMigrations = section.Key("disable_data_migrations").MustBool(false)
cfg.DisableDataMigrations = true
cfg.DisableDataMigrations = section.Key("disable_data_migrations").MustBool(false)
if !cfg.DisableDataMigrations && cfg.getUnifiedStorageType() == "unified" {
cfg.enforceMigrationToUnifiedConfigs()
// Helper log to find instances running migrations in the future
cfg.Logger.Info("Unified migration configs not yet enforced")
//cfg.enforceMigrationToUnifiedConfigs() // TODO: uncomment when ready for release
} else {
cfg.EnableSearch = section.Key("enable_search").MustBool(false)
// Helper log to find instances disabling migration
cfg.Logger.Info("Unified migration configs enforcement disabled", "storage_type", cfg.getUnifiedStorageType(), "disable_data_migrations", cfg.DisableDataMigrations)
}
cfg.EnableSearch = section.Key("enable_search").MustBool(false)
cfg.MaxPageSizeBytes = section.Key("max_page_size_bytes").MustInt(0)
cfg.IndexPath = section.Key("index_path").String()
cfg.IndexWorkers = section.Key("index_workers").MustInt(10)
@@ -102,6 +105,7 @@ func (cfg *Cfg) setUnifiedStorageConfig() {
cfg.MinFileIndexBuildVersion = section.Key("min_file_index_build_version").MustString("")
}
// nolint:unused
// enforceMigrationToUnifiedConfigs enforces configurations required to run migrated resources in mode 5
// All migrated resources in MigratedUnifiedResources are set to mode 5 and unified search is enabled
func (cfg *Cfg) enforceMigrationToUnifiedConfigs() {

View File

@@ -8,6 +8,7 @@ import (
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/infra/kvstore"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/metrics"
sqlstoremigrator "github.com/grafana/grafana/pkg/services/sqlstore/migrator"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/storage/unified/migrations/contract"
@@ -55,8 +56,12 @@ func (p *UnifiedStorageMigrationServiceImpl) Run(ctx context.Context) error {
// skip migrations if disabled in config
if p.cfg.DisableDataMigrations {
metrics.MUnifiedStorageMigrationStatus.Set(1)
logger.Info("Data migrations are disabled, skipping")
return nil
} else {
metrics.MUnifiedStorageMigrationStatus.Set(2)
logger.Info("Data migrations not yet enforced, skipping")
}
// TODO: Re-enable once migrations are ready

View File

@@ -4,7 +4,13 @@ import { useEffect, useMemo } from 'react';
import { AppEvents } from '@grafana/data';
import { t } from '@grafana/i18n';
import { config, getAppEvents } from '@grafana/runtime';
import { DisplayList, iamAPIv0alpha1, useLazyGetDisplayMappingQuery } from 'app/api/clients/iam/v0alpha1';
import {
API_GROUP as IAM_API_GROUP,
API_VERSION as IAM_API_VERSION,
DisplayList,
iamAPIv0alpha1,
useLazyGetDisplayMappingQuery,
} from 'app/api/clients/iam/v0alpha1';
import { useAppNotification } from 'app/core/copy/appNotification';
import {
useDeleteFolderMutation as useDeleteFolderMutationLegacy,
@@ -56,6 +62,8 @@ import {
ReplaceFolderApiArg,
useGetAffectedItemsQuery,
FolderInfo,
ObjectMeta,
OwnerReference,
} from './index';
function getFolderUrl(uid: string, title: string): string {
@@ -66,6 +74,10 @@ function getFolderUrl(uid: string, title: string): string {
return `${config.appSubUrl}/dashboards/f/${uid}/${slug}`;
}
type CombinedFolder = FolderDTO & {
ownerReferences?: OwnerReference[];
};
const combineFolderResponses = (
folder: Folder,
legacyFolder: FolderDTO,
@@ -75,7 +87,7 @@ const combineFolderResponses = (
const updatedBy = folder.metadata.annotations?.[AnnoKeyUpdatedBy];
const createdBy = folder.metadata.annotations?.[AnnoKeyCreatedBy];
const newData: FolderDTO = {
const newData: CombinedFolder = {
canAdmin: legacyFolder.canAdmin,
canDelete: legacyFolder.canDelete,
canEdit: legacyFolder.canEdit,
@@ -84,6 +96,7 @@ const combineFolderResponses = (
createdBy: (createdBy && userDisplay?.display[userDisplay?.keys.indexOf(createdBy)]?.displayName) || 'Anonymous',
updatedBy: (updatedBy && userDisplay?.display[userDisplay?.keys.indexOf(updatedBy)]?.displayName) || 'Anonymous',
...appPlatformFolderToLegacyFolder(folder),
ownerReferences: folder.metadata.ownerReferences || [],
};
if (parents.length) {
@@ -101,7 +114,7 @@ const combineFolderResponses = (
return newData;
};
export async function getFolderByUidFacade(uid: string): Promise<FolderDTO> {
export async function getFolderByUidFacade(uid: string) {
const isVirtualFolder = uid && [GENERAL_FOLDER_UID, config.sharedWithMeFolderUID].includes(uid);
const shouldUseAppPlatformAPI = Boolean(config.featureToggles.foldersAppPlatformAPI);
@@ -216,7 +229,7 @@ export function useGetFolderQueryFacade(uid?: string) {
// Stitch together the responses to create a single FolderDTO object so on the outside this behaves as the legacy
// api client.
let newData: FolderDTO | undefined = undefined;
let newData: CombinedFolder | undefined = undefined;
if (
resultFolder.data &&
resultParents.data &&
@@ -359,14 +372,36 @@ export function useCreateFolder() {
return legacyHook;
}
const createFolderAppPlatform = async (folder: NewFolder) => {
const payload: CreateFolderApiArg = {
const createFolderAppPlatform = async (payload: NewFolder & { createAsTeamFolder?: boolean; teamUid?: string }) => {
const { createAsTeamFolder, teamUid, ...folder } = payload;
const slugifiedTitle = kbn.slugifyForUrl(folder.title);
const metadataName = `team-${slugifiedTitle}`;
const partialMetadata: ObjectMeta =
createAsTeamFolder && teamUid
? {
name: metadataName,
ownerReferences: [
{
apiVersion: `${IAM_API_GROUP}/${IAM_API_VERSION}`,
kind: 'Team',
name: folder.title,
uid: teamUid,
controller: true,
blockOwnerDeletion: false,
},
],
}
: { generateName: 'f' };
const apiPayload: CreateFolderApiArg = {
folder: {
spec: {
title: folder.title,
description: 'Testing a description',
},
metadata: {
generateName: 'f',
...partialMetadata,
annotations: {
...(folder.parentUid && { [AnnoKeyFolder]: folder.parentUid }),
},
@@ -375,7 +410,7 @@ export function useCreateFolder() {
},
};
const result = await createFolder(payload);
const result = await createFolder(apiPayload);
refresh({ childrenOf: folder.parentUid });
deletedDashboardsCache.clear();

View File

@@ -0,0 +1,86 @@
/* eslint-disable @grafana/i18n/no-untranslated-strings */
import { useState } from 'react';
import { Box, Button, Combobox, ComboboxOption, Divider, Stack, Text } from '@grafana/ui';
import { OwnerReference } from 'app/api/clients/folder/v1beta1';
import { useListTeamQuery, API_GROUP, API_VERSION } from 'app/api/clients/iam/v0alpha1';
import { useDispatch } from 'app/types/store';
import { TeamOwnerReference } from './OwnerReference';
import { SupportedResource, useAddOwnerReference, useGetOwnerReferences } from './hooks';
const TeamSelector = ({ onChange }: { onChange: (ownerRef: OwnerReference) => void }) => {
const { data: teams } = useListTeamQuery({});
const teamsOptions = teams?.items.map((team) => ({
label: team.spec.title,
value: team.metadata.name!,
}));
return (
<Combobox
options={teamsOptions}
onChange={(team: ComboboxOption<string>) => {
onChange({
apiVersion: `${API_GROUP}/${API_VERSION}`,
kind: 'Team',
name: team.label,
uid: team.value,
});
}}
/>
);
};
export const ManageOwnerReferences = ({
resource,
resourceId,
}: {
resource: SupportedResource;
resourceId: string;
}) => {
const dispatch = useDispatch();
const [addingNewReference, setAddingNewReference] = useState(false);
const [pendingReference, setPendingReference] = useState<OwnerReference | null>(null);
const ownerReferences = useGetOwnerReferences({ resource, resourceId });
const [trigger, result] = useAddOwnerReference({ resource, resourceId });
const addOwnerReference = (ownerReference: OwnerReference) => {
trigger(ownerReference);
};
return (
<Stack direction="column">
<Text variant="h3">Owned by:</Text>
<Box>
{ownerReferences
.filter((ownerReference) => ownerReference.kind === 'Team')
.map((ownerReference) => (
<>
<TeamOwnerReference key={ownerReference.uid} ownerReference={ownerReference} />
<Divider />
</>
))}
</Box>
<Box>
{addingNewReference && (
<Box paddingBottom={2}>
<Text variant="h3">Add new owner reference:</Text>
<TeamSelector
onChange={(ownerReference) => {
setPendingReference(ownerReference);
}}
/>
<Button
onClick={() => {
addOwnerReference(pendingReference);
}}
>
Save
</Button>
<Divider />
</Box>
)}
<Button onClick={() => setAddingNewReference(true)}>Add new owner reference</Button>
</Box>
</Stack>
);
};

View File

@@ -0,0 +1,37 @@
import { OwnerReference } from '@grafana/api-clients/rtkq/folder/v1beta1';
import { useGetTeamMembersQuery } from '@grafana/api-clients/rtkq/iam/v0alpha1';
import { Stack, Text, Avatar, Link, Tooltip } from '@grafana/ui';
export const getGravatarUrl = (text: string) => {
// todo
return `avatar/bd38b9ecaf6169ca02b848f60a44cb95`;
};
export const TeamOwnerReference = ({ ownerReference }: { ownerReference: OwnerReference }) => {
const { data: teamMembers } = useGetTeamMembersQuery({ name: ownerReference.uid });
const avatarURL = getGravatarUrl(ownerReference.name);
const membersTooltip = (
<>
<Stack gap={1} direction="column">
<Text>Team members:</Text>
{teamMembers?.items?.map((member) => (
<div key={member.identity.name}>
<Avatar src={member.avatarURL} /> {member.displayName}
</div>
))}
</Stack>
</>
);
return (
<Link href={`/org/teams/edit/${ownerReference.uid}/members`} key={ownerReference.uid}>
<Tooltip content={membersTooltip}>
<Stack gap={1} alignItems="center">
<Avatar src={avatarURL} alt={ownerReference.name} /> {ownerReference.name}
</Stack>
</Tooltip>
</Link>
);
};

View File

@@ -0,0 +1,55 @@
import { useState, useEffect } from 'react';
import { useReplaceFolderMutation } from '@grafana/api-clients/rtkq/folder/v1beta1';
import { folderAPIv1beta1, OwnerReference } from 'app/api/clients/folder/v1beta1';
import { useDispatch } from 'app/types/store';
const getReferencesEndpointMap = {
Folder: (resourceId: string) => folderAPIv1beta1.endpoints.getFolder.initiate({ name: resourceId }),
} as const;
export type SupportedResource = keyof typeof getReferencesEndpointMap;
export const useGetOwnerReferences = ({
resource,
resourceId,
}: {
resource: SupportedResource;
resourceId: string;
}) => {
const [ownerReferences, setOwnerReferences] = useState<OwnerReference[]>([]);
const dispatch = useDispatch();
const endpointAction = getReferencesEndpointMap[resource];
useEffect(() => {
dispatch(endpointAction(resourceId)).then(({ data }) => {
if (data?.metadata?.ownerReferences) {
setOwnerReferences(data.metadata.ownerReferences);
}
});
}, [dispatch, endpointAction, resourceId]);
return ownerReferences;
};
export const useAddOwnerReference = ({ resource, resourceId }: { resource: SupportedResource; resourceId: string }) => {
const [replaceFolder, result] = useReplaceFolderMutation();
return [
(ownerReference: OwnerReference) =>
replaceFolder({
name: resourceId,
folder: {
status: {},
metadata: {
name: resourceId,
ownerReferences: [ownerReference],
},
spec: {
title: resourceId,
},
},
}),
result,
] as const;
};

View File

@@ -5,10 +5,12 @@ import { SelectableValue } from '@grafana/data';
import { t } from '@grafana/i18n';
import { Combobox, ComboboxOption, Field, useStyles2 } from '@grafana/ui';
export type AsyncOptionsLoader = (inputValue: string) => Promise<Array<ComboboxOption<string>>>;
export interface AlertLabelDropdownProps {
onChange: (newValue: SelectableValue<string>) => void;
onOpenMenu?: () => void;
options: ComboboxOption[];
options: ComboboxOption[] | AsyncOptionsLoader;
defaultValue?: SelectableValue;
type: 'key' | 'value';
isLoading?: boolean;
@@ -38,7 +40,7 @@ const AlertLabelDropdown: FC<AlertLabelDropdownProps> = forwardRef<HTMLDivElemen
return (
<div ref={ref}>
<Field disabled={false} data-testid={`alertlabel-${type}-picker`} className={styles.resetMargin}>
<Field noMargin disabled={false} data-testid={`alertlabel-${type}-picker`} className={styles.resetMargin}>
<Combobox<string>
placeholder={t('alerting.alert-label-dropdown.placeholder-select', 'Choose {{type}}', { type })}
width={25}

View File

@@ -2,7 +2,7 @@ import { useMemo } from 'react';
import { OpenAssistantProps, createAssistantContextItem, useAssistant } from '@grafana/assistant';
import { t } from '@grafana/i18n';
import { reportInteraction } from '@grafana/runtime';
import { config, reportInteraction } from '@grafana/runtime';
import { Menu } from '@grafana/ui';
import { GrafanaAlertingRule, GrafanaRecordingRule, GrafanaRule } from 'app/types/unified-alerting';
@@ -98,8 +98,16 @@ function buildAnalyzeRulePrompt(rule: GrafanaRule): string {
function buildAnalyzeAlertingRulePrompt(rule: GrafanaAlertingRule): string {
const state = rule.state || 'firing';
const timeInfo = rule.activeAt ? ` starting at ${new Date(rule.activeAt).toISOString()}` : '';
const alertsNavigationPrompt = config.featureToggles.alertingTriage
? '\n- Include navigation to follow up on the alerts page'
: '';
let prompt = `Analyze the ${state} alert "${rule.name}"${timeInfo}.`;
let prompt = `
Analyze the ${state} alert "${rule.name} (uid: ${rule.uid})"${timeInfo}.
- Get the rule definition, read the queries and run them to understand the rule
- Get the rule state and instances to understand its current state
- Read the rule conditions and understand how it works. Then suggest query and conditions improvements if applicable${alertsNavigationPrompt}
`;
const description = rule.annotations?.description || rule.annotations?.summary || '';
if (description) {

View File

@@ -1,5 +1,5 @@
import { css, cx } from '@emotion/css';
import { FC, useCallback, useMemo, useState } from 'react';
import { FC, useCallback, useMemo } from 'react';
import { Controller, FormProvider, useFieldArray, useForm, useFormContext } from 'react-hook-form';
import { AlertLabels } from '@grafana/alerting/unstable';
@@ -13,7 +13,7 @@ import { SupportedPlugin } from '../../../types/pluginBridges';
import { KBObjectArray, RuleFormType, RuleFormValues } from '../../../types/rule-form';
import { isPrivateLabelKey } from '../../../utils/labels';
import { isRecordingRuleByType } from '../../../utils/rules';
import AlertLabelDropdown from '../../AlertLabelDropdown';
import AlertLabelDropdown, { AsyncOptionsLoader } from '../../AlertLabelDropdown';
import { NeedHelpInfo } from '../NeedHelpInfo';
import { useGetLabelsFromDataSourceName } from '../useAlertRuleSuggestions';
@@ -117,8 +117,7 @@ export function useCombinedLabels(
dataSourceName: string,
labelsPluginInstalled: boolean,
loadingLabelsPlugin: boolean,
labelsInSubform: Array<{ key: string; value: string }>,
selectedKey: string
labelsInSubform: Array<{ key: string; value: string }>
) {
// ------- Get labels keys and their values from existing alerts
const { labels: labelsByKeyFromExisingAlerts, isLoading } = useGetLabelsFromDataSourceName(dataSourceName);
@@ -126,18 +125,19 @@ export function useCombinedLabels(
const { loading: isLoadingLabels, labelsOpsKeys = [] } = useGetOpsLabelsKeys(
!labelsPluginInstalled || loadingLabelsPlugin
);
//------ Convert the labelsOpsKeys to the same format as the labelsByKeyFromExisingAlerts
const labelsByKeyOps = useMemo(() => {
return labelsOpsKeys.reduce((acc: Record<string, Set<string>>, label) => {
acc[label.name] = new Set();
return acc;
}, {});
// Lazy query for fetching label values on demand
const [fetchLabelValues] = labelsApi.endpoints.getLabelValues.useLazyQuery();
//------ Convert the labelsOpsKeys to a Set for quick lookup
const opsLabelKeysSet = useMemo(() => {
return new Set(labelsOpsKeys.map((label) => label.name));
}, [labelsOpsKeys]);
//------- Convert the keys from the ops labels to options for the dropdown
const keysFromGopsLabels = useMemo(() => {
return mapLabelsToOptions(Object.keys(labelsByKeyOps).filter(isKeyAllowed), labelsInSubform);
}, [labelsByKeyOps, labelsInSubform]);
return mapLabelsToOptions(Array.from(opsLabelKeysSet).filter(isKeyAllowed), labelsInSubform);
}, [opsLabelKeysSet, labelsInSubform]);
//------- Convert the keys from the existing alerts to options for the dropdown
const keysFromExistingAlerts = useMemo(() => {
@@ -158,70 +158,47 @@ export function useCombinedLabels(
},
];
const selectedKeyIsFromAlerts = labelsByKeyFromExisingAlerts.has(selectedKey);
const selectedKeyIsFromOps = labelsByKeyOps[selectedKey] !== undefined && labelsByKeyOps[selectedKey]?.size > 0;
const selectedKeyDoesNotExist = !selectedKeyIsFromAlerts && !selectedKeyIsFromOps;
// Create an async options loader for a specific key
// This is called by Combobox when the dropdown menu opens
const createAsyncValuesLoader = useCallback(
(key: string): AsyncOptionsLoader => {
return async (_inputValue: string): Promise<Array<ComboboxOption<string>>> => {
if (!isKeyAllowed(key) || !key) {
return [];
}
const valuesAlreadyFetched = !selectedKeyIsFromAlerts && labelsByKeyOps[selectedKey]?.size > 0;
// Collect values from existing alerts first
const valuesFromAlerts = labelsByKeyFromExisingAlerts.get(key);
const existingValues = valuesFromAlerts ? Array.from(valuesFromAlerts) : [];
// Only fetch the values for the selected key if it is from ops and the values are not already fetched (the selected key is not in the labelsByKeyOps object)
const {
currentData: valuesData,
isLoading: isLoadingValues = false,
error,
} = labelsApi.endpoints.getLabelValues.useQuery(
{ key: selectedKey },
{
skip:
!labelsPluginInstalled ||
!selectedKey ||
selectedKeyIsFromAlerts ||
valuesAlreadyFetched ||
selectedKeyDoesNotExist,
}
);
// Collect values from ops labels (if plugin is installed)
let opsValues: string[] = [];
if (labelsPluginInstalled && opsLabelKeysSet.has(key)) {
try {
// RTK Query handles caching automatically
const result = await fetchLabelValues({ key }, true).unwrap();
if (result?.values?.length) {
opsValues = result.values.map((value) => value.name);
}
} catch (error) {
console.error('Failed to fetch label values for key:', key, error);
}
}
// these are the values for the selected key in case it is from ops
const valuesFromSelectedGopsKey = useMemo(() => {
// if it is from alerts, we need to fetch the values from the existing alerts
if (selectedKeyIsFromAlerts) {
return [];
}
// in case of a label from ops, we need to fetch the values from the plugin
// fetch values from ops only if there is no value for the key
const valuesForSelectedKey = labelsByKeyOps[selectedKey];
const valuesAlreadyFetched = valuesForSelectedKey?.size > 0;
if (valuesAlreadyFetched) {
return mapLabelsToOptions(valuesForSelectedKey);
}
if (!isLoadingValues && valuesData?.values?.length && !error) {
const values = valuesData?.values.map((value) => value.name);
labelsByKeyOps[selectedKey] = new Set(values);
return mapLabelsToOptions(values);
}
return [];
}, [selectedKeyIsFromAlerts, labelsByKeyOps, selectedKey, isLoadingValues, valuesData, error]);
// Combine: existing values first, then unique ops values (Set preserves first occurrence)
const combinedValues = [...new Set([...existingValues, ...opsValues])];
const getValuesForLabel = useCallback(
(key: string) => {
if (!isKeyAllowed(key)) {
return [];
}
// values from existing alerts will take precedence over values from ops
if (selectedKeyIsFromAlerts || !labelsPluginInstalled) {
return mapLabelsToOptions(labelsByKeyFromExisingAlerts.get(key));
}
return valuesFromSelectedGopsKey;
return mapLabelsToOptions(combinedValues);
};
},
[labelsByKeyFromExisingAlerts, labelsPluginInstalled, valuesFromSelectedGopsKey, selectedKeyIsFromAlerts]
[labelsByKeyFromExisingAlerts, labelsPluginInstalled, opsLabelKeysSet, fetchLabelValues]
);
return {
loading: isLoading || isLoadingLabels,
keysFromExistingAlerts,
groupedOptions,
getValuesForLabel,
createAsyncValuesLoader,
};
}
@@ -248,30 +225,30 @@ export function LabelsWithSuggestions({ dataSourceName }: LabelsWithSuggestionsP
append({ key: '', value: '' });
}, [append]);
const [selectedKey, setSelectedKey] = useState('');
// check if the labels plugin is installed
const { installed: labelsPluginInstalled = false, loading: loadingLabelsPlugin } = usePluginBridge(
SupportedPlugin.Labels
);
const { loading, keysFromExistingAlerts, groupedOptions, getValuesForLabel } = useCombinedLabels(
const { loading, keysFromExistingAlerts, groupedOptions, createAsyncValuesLoader } = useCombinedLabels(
dataSourceName,
labelsPluginInstalled,
loadingLabelsPlugin,
labelsInSubform,
selectedKey
labelsInSubform
);
return (
<Stack direction="column" gap={2} alignItems="flex-start">
{fields.map((field, index) => {
// Get the values for this specific row's key directly without memoization
// Create an async loader for this specific row's key
// This will be called by Combobox when the dropdown opens
const currentKey = labelsInSubform[index]?.key || '';
const valuesForCurrentKey = getValuesForLabel(currentKey);
const asyncValuesLoader = createAsyncValuesLoader(currentKey);
return (
<div key={field.id} className={cx(styles.flexRow, styles.centerAlignRow)} id="hola">
<div key={field.id} className={cx(styles.flexRow, styles.centerAlignRow)}>
<Field
noMargin
className={styles.labelInput}
invalid={Boolean(errors.labelsInSubform?.[index]?.key?.message)}
error={errors.labelsInSubform?.[index]?.key?.message}
@@ -295,7 +272,6 @@ export function LabelsWithSuggestions({ dataSourceName }: LabelsWithSuggestionsP
onChange={(newValue: SelectableValue) => {
if (newValue) {
onChange(newValue.value || newValue.label || '');
setSelectedKey(newValue.value);
}
}}
type="key"
@@ -306,6 +282,7 @@ export function LabelsWithSuggestions({ dataSourceName }: LabelsWithSuggestionsP
</Field>
<InlineLabel className={styles.equalSign}>=</InlineLabel>
<Field
noMargin
className={styles.labelInput}
invalid={Boolean(errors.labelsInSubform?.[index]?.value?.message)}
error={errors.labelsInSubform?.[index]?.value?.message}
@@ -320,16 +297,13 @@ export function LabelsWithSuggestions({ dataSourceName }: LabelsWithSuggestionsP
<AlertLabelDropdown
{...rest}
defaultValue={value ? { label: value, value: value } : undefined}
options={valuesForCurrentKey}
options={asyncValuesLoader}
isLoading={loading}
onChange={(newValue: SelectableValue) => {
if (newValue) {
onChange(newValue.value || newValue.label || '');
}
}}
onOpenMenu={() => {
setSelectedKey(labelsInSubform[index].key);
}}
type="value"
/>
);
@@ -368,6 +342,7 @@ export const LabelsWithoutSuggestions: FC = () => {
<div key={field.id}>
<div className={cx(styles.flexRow, styles.centerAlignRow)} data-testid="alertlabel-input-wrapper">
<Field
noMargin
className={styles.labelInput}
invalid={!!errors.labels?.[index]?.key?.message}
error={errors.labels?.[index]?.key?.message}
@@ -386,6 +361,7 @@ export const LabelsWithoutSuggestions: FC = () => {
</Field>
<InlineLabel className={styles.equalSign}>=</InlineLabel>
<Field
noMargin
className={styles.labelInput}
invalid={!!errors.labels?.[index]?.value?.message}
error={errors.labels?.[index]?.value?.message}

View File

@@ -0,0 +1,261 @@
import * as React from 'react';
import { FormProvider, useForm } from 'react-hook-form';
import { render, screen, waitFor, within } from 'test/test-utils';
import { clearPluginSettingsCache } from 'app/features/plugins/pluginSettings';
import { mockAlertRuleApi, setupMswServer } from '../../../mockApi';
import { getGrafanaRule } from '../../../mocks';
import {
defaultLabelValues,
getLabelValuesHandler,
getMockOpsLabels,
} from '../../../mocks/server/handlers/plugins/grafana-labels-app';
import { GRAFANA_RULES_SOURCE_NAME } from '../../../utils/datasource';
import { LabelsWithSuggestions } from './LabelsField';
// Existing labels in the form (simulating editing an existing alert rule with ops labels)
const existingOpsLabels = getMockOpsLabels();
const SubFormProviderWrapper = ({
children,
labels,
}: React.PropsWithChildren<{ labels: Array<{ key: string; value: string }> }>) => {
const methods = useForm({ defaultValues: { labelsInSubform: labels } });
return <FormProvider {...methods}>{children}</FormProvider>;
};
const grafanaRule = getGrafanaRule(undefined, {
uid: 'test-rule-uid',
title: 'test-alert',
namespace_uid: 'folderUID1',
data: [
{
refId: 'A',
datasourceUid: 'uid1',
queryType: 'alerting',
relativeTimeRange: { from: 1000, to: 2000 },
model: {
refId: 'A',
expression: 'vector(1)',
queryType: 'alerting',
datasource: { uid: 'uid1', type: 'prometheus' },
},
},
],
});
// Use the standard MSW server setup which includes all plugin handlers
const server = setupMswServer();
describe('LabelsField with ops labels', () => {
beforeEach(() => {
// Mock the ruler rules API
mockAlertRuleApi(server).rulerRules(GRAFANA_RULES_SOURCE_NAME, {
[grafanaRule.namespace.name]: [{ name: grafanaRule.group.name, interval: '1m', rules: [grafanaRule.rulerRule!] }],
});
});
afterEach(() => {
server.resetHandlers();
clearPluginSettingsCache();
});
async function renderLabelsWithOpsLabels(labels = existingOpsLabels) {
const view = render(
<SubFormProviderWrapper labels={labels}>
<LabelsWithSuggestions dataSourceName="grafana" />
</SubFormProviderWrapper>
);
// Wait for the dropdowns to be rendered
await waitFor(() => {
expect(screen.getAllByTestId('alertlabel-key-picker')).toHaveLength(labels.length);
});
return view;
}
it('should display existing ops labels correctly', async () => {
await renderLabelsWithOpsLabels();
// Verify the keys are displayed
expect(screen.getByTestId('labelsInSubform-key-0').querySelector('input')).toHaveValue(existingOpsLabels[0].key);
expect(screen.getByTestId('labelsInSubform-key-1').querySelector('input')).toHaveValue(existingOpsLabels[1].key);
// Verify the values are displayed
expect(screen.getByTestId('labelsInSubform-value-0').querySelector('input')).toHaveValue(
existingOpsLabels[0].value
);
expect(screen.getByTestId('labelsInSubform-value-1').querySelector('input')).toHaveValue(
existingOpsLabels[1].value
);
});
it('should render value dropdowns for each label', async () => {
await renderLabelsWithOpsLabels();
// Verify we have value pickers for each label
expect(screen.getAllByTestId('alertlabel-value-picker')).toHaveLength(2);
});
it('should allow deleting a label', async () => {
const { user } = await renderLabelsWithOpsLabels();
expect(screen.getAllByTestId('alertlabel-key-picker')).toHaveLength(2);
await user.click(screen.getByTestId('delete-label-1'));
expect(screen.getAllByTestId('alertlabel-key-picker')).toHaveLength(1);
expect(screen.getByTestId('labelsInSubform-key-0').querySelector('input')).toHaveValue(existingOpsLabels[0].key);
});
it('should allow adding a new label', async () => {
const { user } = await renderLabelsWithOpsLabels();
await waitFor(() => expect(screen.getByText('Add more')).toBeVisible());
await user.click(screen.getByText('Add more'));
expect(screen.getAllByTestId('alertlabel-key-picker')).toHaveLength(3);
expect(screen.getByTestId('labelsInSubform-key-2').querySelector('input')).toHaveValue('');
});
it('should allow typing custom values in dropdowns', async () => {
const { user } = await renderLabelsWithOpsLabels();
// Add a new label
await waitFor(() => expect(screen.getByText('Add more')).toBeVisible());
await user.click(screen.getByText('Add more'));
// Type a custom key and value
const newKeyInput = screen.getByTestId('labelsInSubform-key-2').querySelector('input');
const newValueInput = screen.getByTestId('labelsInSubform-value-2').querySelector('input');
await user.type(newKeyInput!, 'customKey{enter}');
await user.type(newValueInput!, 'customValue{enter}');
await waitFor(() => {
expect(screen.getByTestId('labelsInSubform-key-2').querySelector('input')).toHaveValue('customKey');
});
expect(screen.getByTestId('labelsInSubform-value-2').querySelector('input')).toHaveValue('customValue');
});
// When editing an existing alert with labels, the value dropdown should open and be interactive
it('should allow opening and interacting with existing label value dropdown', async () => {
const { user } = await renderLabelsWithOpsLabels();
// Click on the first label's value dropdown (sentMail) to open it
const firstValueDropdown = within(screen.getByTestId('labelsInSubform-value-0'));
const combobox = firstValueDropdown.getByRole('combobox');
// Verify initial value is set
expect(combobox).toHaveValue(existingOpsLabels[0].value);
// Open the dropdown
await user.click(combobox);
// Verify dropdown is open (not showing "No options found" state)
expect(combobox).toHaveAttribute('aria-expanded', 'true');
// Close and reopen to verify it remains interactive
await user.keyboard('{Escape}');
expect(combobox).toHaveAttribute('aria-expanded', 'false');
await user.click(combobox);
expect(combobox).toHaveAttribute('aria-expanded', 'true');
});
// Test that value dropdowns can be opened and interacted with for different label keys
// Note: Dropdown content cannot be verified via text due to Combobox virtualization in JSDOM
it('should allow opening value dropdowns for different label keys', async () => {
const { user } = await renderLabelsWithOpsLabels();
// Open the first label's value dropdown (sentMail)
const firstValueDropdown = within(screen.getByTestId('labelsInSubform-value-0'));
const firstCombobox = firstValueDropdown.getByRole('combobox');
await user.click(firstCombobox);
// Verify dropdown is open
expect(firstCombobox).toHaveAttribute('aria-expanded', 'true');
// Close and open second dropdown
await user.keyboard('{Escape}');
// Open the second label's value dropdown (stage)
const secondValueDropdown = within(screen.getByTestId('labelsInSubform-value-1'));
const secondCombobox = secondValueDropdown.getByRole('combobox');
await user.click(secondCombobox);
// Verify second dropdown is open
expect(secondCombobox).toHaveAttribute('aria-expanded', 'true');
});
// Test that after deleting and re-adding a label, the value dropdown can be opened
it('should allow opening value dropdown after deleting and re-adding a label', async () => {
const { user } = await renderLabelsWithOpsLabels();
// Delete the second label (stage)
await user.click(screen.getByTestId('delete-label-1'));
expect(screen.getAllByTestId('alertlabel-key-picker')).toHaveLength(1);
// Add a new label
await waitFor(() => expect(screen.getByText('Add more')).toBeVisible());
await user.click(screen.getByText('Add more'));
// Set the new label key to 'team'
const newKeyDropdown = within(screen.getByTestId('labelsInSubform-key-1'));
await user.type(newKeyDropdown.getByRole('combobox'), 'team{enter}');
// Verify the key was set
await waitFor(() => {
expect(screen.getByTestId('labelsInSubform-key-1').querySelector('input')).toHaveValue('team');
});
// Open the new label's value dropdown
const newValueDropdown = within(screen.getByTestId('labelsInSubform-value-1'));
const combobox = newValueDropdown.getByRole('combobox');
await user.click(combobox);
// Verify dropdown is open
expect(combobox).toHaveAttribute('aria-expanded', 'true');
});
// Test that opening the value dropdown requests values for the CORRECT label key
// This verifies the async loader is called with the right key
it('should request correct label values when opening value dropdown', async () => {
const requestedKeys: string[] = [];
// Add a spy handler that tracks which keys are requested
server.use(getLabelValuesHandler(defaultLabelValues, (key) => requestedKeys.push(key)));
const { user } = await renderLabelsWithOpsLabels();
// Open the first label's value dropdown (sentMail)
const firstValueDropdown = within(screen.getByTestId('labelsInSubform-value-0'));
await user.click(firstValueDropdown.getByRole('combobox'));
// Wait for the API call to be made
await waitFor(() => {
expect(requestedKeys).toContain('sentMail');
});
// Close dropdown
await user.keyboard('{Escape}');
// Clear the tracked keys
requestedKeys.length = 0;
// Open the second label's value dropdown (stage)
const secondValueDropdown = within(screen.getByTestId('labelsInSubform-value-1'));
await user.click(secondValueDropdown.getByRole('combobox'));
// Wait for the API call - should request 'stage', NOT 'sentMail'
await waitFor(() => {
expect(requestedKeys).toContain('stage');
});
// Verify we didn't request the wrong key (the bug from escalation #19378)
expect(requestedKeys).not.toContain('sentMail');
});
});

View File

@@ -0,0 +1,140 @@
/**
* Unit tests for the createAsyncValuesLoader function in useCombinedLabels hook.
*
* These tests verify that:
* 1. Values from existing alerts are shown first
* 2. Values from ops labels are shown after existing values
* 3. Duplicate values between existing and ops are excluded from ops
* 4. The order is: existing values first, then unique ops values
*/
describe('createAsyncValuesLoader logic', () => {
// Simulate the data structures used in useCombinedLabels
const labelsByKeyFromExistingAlerts = new Map<string, Set<string>>([
['severity', new Set(['warning', 'error', 'critical'])],
['team', new Set(['frontend', 'backend', 'platform'])],
['environment', new Set(['production', 'staging'])],
]);
// Simulate ops labels (from grafana-labels-app plugin)
const opsLabelValues: Record<string, string[]> = {
severity: ['info', 'warning', 'critical', 'fatal'], // 'warning' and 'critical' overlap with existing
team: ['frontend', 'sre', 'devops'], // 'frontend' overlaps with existing
environment: ['production', 'staging', 'development', 'testing'], // 'production' and 'staging' overlap
cluster: ['us-east-1', 'us-west-2', 'eu-central-1'], // ops-only key
};
const opsLabelKeys = new Set(['severity', 'team', 'environment', 'cluster']);
const mapLabelsToOptions = (items: string[]) => {
return items.map((item) => ({ label: item, value: item }));
};
// This simulates the current implementation of createAsyncValuesLoader
const getValuesForLabel = (key: string, labelsPluginInstalled: boolean): Array<{ label: string; value: string }> => {
if (!key) {
return [];
}
// Collect values from existing alerts first
const valuesFromAlerts = labelsByKeyFromExistingAlerts.get(key);
const existingValues = valuesFromAlerts ? Array.from(valuesFromAlerts) : [];
// Collect values from ops labels (if plugin is installed)
let opsValues: string[] = [];
if (labelsPluginInstalled && opsLabelKeys.has(key)) {
opsValues = opsLabelValues[key] || [];
}
// Combine: existing values first, then unique ops values (Set preserves first occurrence)
const combinedValues = [...new Set([...existingValues, ...opsValues])];
return mapLabelsToOptions(combinedValues);
};
describe('when labels plugin is installed', () => {
it('should combine existing and ops values with existing first', () => {
const values = getValuesForLabel('severity', true);
// Existing: warning, error, critical
// Ops: info, warning, critical, fatal (warning and critical are duplicates)
// Expected: warning, error, critical, info, fatal
expect(values).toHaveLength(5);
expect(values.map((v) => v.value)).toEqual(['warning', 'error', 'critical', 'info', 'fatal']);
});
it('should exclude duplicate ops values that exist in existing alerts', () => {
const values = getValuesForLabel('environment', true);
// Existing: production, staging
// Ops: production, staging, development, testing (production and staging are duplicates)
// Expected: production, staging, development, testing
expect(values).toHaveLength(4);
expect(values.map((v) => v.value)).toEqual(['production', 'staging', 'development', 'testing']);
});
it('should return only ops values for ops-only keys', () => {
const values = getValuesForLabel('cluster', true);
// No existing alerts for 'cluster', only ops values
expect(values).toHaveLength(3);
expect(values.map((v) => v.value)).toEqual(['us-east-1', 'us-west-2', 'eu-central-1']);
});
it('should return only existing values for keys not in ops', () => {
// Add a key that exists in alerts but not in ops
labelsByKeyFromExistingAlerts.set('custom', new Set(['value1', 'value2']));
const values = getValuesForLabel('custom', true);
expect(values).toHaveLength(2);
expect(values.map((v) => v.value)).toEqual(['value1', 'value2']);
// Cleanup
labelsByKeyFromExistingAlerts.delete('custom');
});
});
describe('when labels plugin is NOT installed', () => {
it('should return only existing alert values', () => {
const values = getValuesForLabel('severity', false);
// Only existing values, no ops values
expect(values).toHaveLength(3);
expect(values.map((v) => v.value)).toEqual(['warning', 'error', 'critical']);
});
it('should return empty array for ops-only keys', () => {
const values = getValuesForLabel('cluster', false);
// 'cluster' only exists in ops, not in existing alerts
expect(values).toHaveLength(0);
});
});
describe('edge cases', () => {
it('should return empty array for empty key', () => {
const values = getValuesForLabel('', true);
expect(values).toHaveLength(0);
});
it('should return empty array for unknown keys', () => {
const values = getValuesForLabel('unknown-key', true);
expect(values).toHaveLength(0);
});
it('should preserve order: existing values first, then unique ops values', () => {
const values = getValuesForLabel('team', true);
// Existing: frontend, backend, platform
// Ops: frontend, sre, devops (frontend is duplicate)
// Expected order: frontend, backend, platform, sre, devops
const valueStrings = values.map((v) => v.value);
// Check that existing values come before ops values
expect(valueStrings.indexOf('frontend')).toBeLessThan(valueStrings.indexOf('sre'));
expect(valueStrings.indexOf('backend')).toBeLessThan(valueStrings.indexOf('sre'));
expect(valueStrings.indexOf('platform')).toBeLessThan(valueStrings.indexOf('devops'));
});
});
});

View File

@@ -1,11 +1,12 @@
/**
* Re-exports all plugin proxy handlers
*/
import labelsHandlers from './grafana-labels-app';
import onCallHandlers from './grafana-oncall';
/**
* Array of all plugin handlers that are required across Alerting tests
*/
const allPluginProxyHandlers = [...onCallHandlers];
const allPluginProxyHandlers = [...onCallHandlers, ...labelsHandlers];
export default allPluginProxyHandlers;

View File

@@ -0,0 +1,79 @@
import { HttpResponse, http } from 'msw';
import { LabelItem, LabelKeyAndValues } from 'app/features/alerting/unified/api/labelsApi';
import { SupportedPlugin } from 'app/features/alerting/unified/types/pluginBridges';
const BASE_URL = `/api/plugins/${SupportedPlugin.Labels}/resources`;
// Default mock data for ops labels
export const defaultLabelKeys: LabelItem[] = [
{ id: '1', name: 'sentMail', prescribed: false },
{ id: '2', name: 'stage', prescribed: false },
{ id: '3', name: 'team', prescribed: false },
];
export const defaultLabelValues: Record<string, LabelItem[]> = {
sentMail: [
{ id: '1', name: 'true', prescribed: false },
{ id: '2', name: 'false', prescribed: false },
],
stage: [
{ id: '1', name: 'production', prescribed: false },
{ id: '2', name: 'staging', prescribed: false },
{ id: '3', name: 'development', prescribed: false },
],
team: [
{ id: '1', name: 'frontend', prescribed: false },
{ id: '2', name: 'backend', prescribed: false },
{ id: '3', name: 'platform', prescribed: false },
],
};
/**
* Helper to generate mock ops labels in the form format (key-value pairs).
* @param keys - Array of label key names to include (defaults to first two: sentMail, stage)
* @param labelValues - Optional custom label values map
* @returns Array of { key, value } objects for use in form tests
*/
export function getMockOpsLabels(
keys: string[] = [defaultLabelKeys[0].name, defaultLabelKeys[1].name],
labelValues: Record<string, LabelItem[]> = defaultLabelValues
): Array<{ key: string; value: string }> {
return keys.map((key) => ({
key,
value: labelValues[key]?.[0]?.name ?? '',
}));
}
/**
* Handler for GET /api/plugins/grafana-labels-app/resources/v1/labels/keys
* Returns all available label keys
*/
export const getLabelsKeysHandler = (labelKeys: LabelItem[] = defaultLabelKeys) =>
http.get(`${BASE_URL}/v1/labels/keys`, () => {
return HttpResponse.json(labelKeys);
});
/**
* Handler for GET /api/plugins/grafana-labels-app/resources/v1/labels/name/:key
* Returns values for a specific label key.
* @param labelValues - Custom label values map (defaults to defaultLabelValues)
* @param onKeyRequested - Optional callback to spy on which keys are requested (useful for testing)
*/
export const getLabelValuesHandler = (
labelValues: Record<string, LabelItem[]> = defaultLabelValues,
onKeyRequested?: (key: string) => void
) =>
http.get<{ key: string }>(`${BASE_URL}/v1/labels/name/:key`, ({ params }) => {
const key = params.key;
onKeyRequested?.(key);
const values = labelValues[key] || [];
const response: LabelKeyAndValues = {
labelKey: { id: '1', name: key, prescribed: false },
values,
};
return HttpResponse.json(response);
});
const handlers = [getLabelsKeysHandler(), getLabelValuesHandler()];
export default handlers;

View File

@@ -82,7 +82,7 @@ export default function RulesFilter({ viewMode, onViewModeChange }: RulesFilterP
const styles = useStyles2(getStyles);
const [isPopupOpen, setIsPopupOpen] = useState(false);
const { searchQuery, updateFilters, setSearchQuery } = useRulesFilter();
const { filterState, searchQuery, updateFilters, setSearchQuery } = useRulesFilter();
const popupRef = useRef<HTMLDivElement>(null);
const { pluginsFilterEnabled } = usePluginsFilterStatus();
@@ -104,7 +104,7 @@ export default function RulesFilter({ viewMode, onViewModeChange }: RulesFilterP
};
const handleAdvancedFilters: SubmitHandler<AdvancedFilters> = (values) => {
updateFilters(formAdvancedFiltersToRuleFilter(values));
updateFilters(formAdvancedFiltersToRuleFilter(values, filterState.freeFormWords));
trackFilterButtonApplyClick(values, pluginsFilterEnabled);
setIsPopupOpen(false);
};

View File

@@ -5,9 +5,12 @@ import { RulesFilter } from '../../search/rulesSearchParser';
import { AdvancedFilters } from './types';
export function formAdvancedFiltersToRuleFilter(values: AdvancedFilters): RulesFilter {
export function formAdvancedFiltersToRuleFilter(
values: AdvancedFilters,
existingFreeFormWords: string[] = []
): RulesFilter {
return {
freeFormWords: [],
freeFormWords: existingFreeFormWords,
...values,
namespace: values.namespace || undefined,
groupName: values.groupName || undefined,

View File

@@ -134,6 +134,29 @@ export const pluginMeta = {
module: 'public/plugins/grafana-asserts-app/module.js',
baseUrl: 'public/plugins/grafana-asserts-app',
} satisfies PluginMeta,
[SupportedPlugin.Labels]: {
id: SupportedPlugin.Labels,
name: 'Labels',
type: PluginType.app,
enabled: true,
info: {
author: {
name: 'Grafana Labs',
url: '',
},
description: 'Labels management for alerting',
links: [],
logos: {
small: 'public/plugins/grafana-labels-app/img/logo.svg',
large: 'public/plugins/grafana-labels-app/img/logo.svg',
},
screenshots: [],
version: 'local-dev',
updated: '2024-04-09',
},
module: 'public/plugins/grafana-labels-app/module.js',
baseUrl: 'public/plugins/grafana-labels-app',
} satisfies PluginMeta,
};
export const plugins: PluginMeta[] = [
@@ -141,6 +164,7 @@ export const plugins: PluginMeta[] = [
pluginMeta[SupportedPlugin.Incident],
pluginMeta[SupportedPlugin.OnCall],
pluginMeta['grafana-asserts-app'],
pluginMeta[SupportedPlugin.Labels],
];
export function pluginMetaToPluginConfig(pluginMeta: PluginMeta): AppPluginConfig {

View File

@@ -6,8 +6,9 @@ import AutoSizer from 'react-virtualized-auto-sizer';
import { GrafanaTheme2 } from '@grafana/data';
import { Trans } from '@grafana/i18n';
import { config, reportInteraction } from '@grafana/runtime';
import { LinkButton, FilterInput, useStyles2, Text, Stack } from '@grafana/ui';
import { LinkButton, FilterInput, useStyles2, Text, Stack, Box, Divider } from '@grafana/ui';
import { useGetFolderQueryFacade, useUpdateFolder } from 'app/api/clients/folder/v1beta1/hooks';
import { TeamOwnerReference } from 'app/core/components/OwnerReferences/OwnerReference';
import { Page } from 'app/core/components/Page/Page';
import { getConfig } from 'app/core/config';
import { useDispatch } from 'app/types/store';
@@ -146,6 +147,19 @@ const BrowseDashboardsPage = memo(({ queryParams }: { queryParams: Record<string
);
};
const ownerReferences = folderDTO && 'ownerReferences' in folderDTO && (
<Box>
{folderDTO.ownerReferences
?.filter((ref) => ref.kind === 'Team')
.map((ref) => (
<Stack key={ref.uid} direction="row">
<Text>Owned by team:</Text>
<TeamOwnerReference ownerReference={ref} />
</Stack>
))}
</Box>
);
return (
<Page
navId="dashboards/browse"
@@ -153,7 +167,8 @@ const BrowseDashboardsPage = memo(({ queryParams }: { queryParams: Record<string
onEditTitle={showEditTitle ? onEditTitle : undefined}
renderTitle={renderTitle}
actions={
<>
<Stack alignItems="center">
{ownerReferences}
{config.featureToggles.restoreDashboards && hasAdminRights && (
<LinkButton
variant="secondary"
@@ -173,7 +188,7 @@ const BrowseDashboardsPage = memo(({ queryParams }: { queryParams: Record<string
isReadOnlyRepo={isReadOnlyRepo}
/>
)}
</>
</Stack>
}
>
<Page.Contents className={styles.pageContents}>

View File

@@ -29,7 +29,7 @@ export async function listFolders(
});
}
return folders.map((item) => ({
const result = folders.map((item) => ({
kind: 'folder',
uid: item.uid,
title: item.title,
@@ -40,6 +40,20 @@ export async function listFolders(
// URLs from the backend come with subUrlPrefix already included, so match that behaviour here
url: isSharedWithMe(item.uid) ? undefined : getFolderURL(item.uid),
}));
if (!parentUID) {
// result.unshift({
// kind: 'folder',
// uid: 'teamfolders',
// title: 'Team folders',
// parentTitle,
// parentUID,
// managedBy: undefined,
// url: undefined,
// });
}
return result;
}
export async function listDashboards(parentUID?: string, page = 1, pageSize = PAGE_SIZE): Promise<DashboardViewItem[]> {

View File

@@ -40,7 +40,7 @@ export default function CheckboxCell({
}
}
if (isSharedWithMe(item.uid)) {
if (isSharedWithMe(item.uid) || item.uid === 'teamfolders') {
return <CheckboxSpacer />;
}

View File

@@ -8,7 +8,8 @@ import InfiniteLoader from 'react-window-infinite-loader';
import { GrafanaTheme2, isTruthy } from '@grafana/data';
import { selectors } from '@grafana/e2e-selectors';
import { Trans, t } from '@grafana/i18n';
import { useStyles2 } from '@grafana/ui';
import { Avatar, useStyles2 } from '@grafana/ui';
import { TeamOwnerReference } from 'app/core/components/OwnerReferences/OwnerReference';
import { DashboardViewItem } from 'app/features/search/types';
import {
@@ -102,8 +103,27 @@ export function DashboardsTree({
Header: t('browse-dashboards.dashboards-tree.tags-column', 'Tags'),
Cell: (props: DashboardsTreeCellProps) => <TagsCell {...props} onTagClick={onTagClick} />,
};
const ownerReferencesColumn: DashboardsTreeColumn = {
id: 'ownerReferences',
width: 2,
Header: 'Owner',
Cell: ({ row: { original: data } }) => {
const ownerReferences = data.item.ownerReferences;
if (!ownerReferences) {
return null;
}
return (
<div>
{ownerReferences.map((ownerReference) => {
return <TeamOwnerReference ownerReference={ownerReference} key={ownerReference.uid} />;
})}
</div>
);
},
};
const canSelect = canSelectItems(permissions);
const columns = [canSelect && checkboxColumn, nameColumn, tagsColumns].filter(isTruthy);
const columns = [canSelect && checkboxColumn, nameColumn, ownerReferencesColumn, tagsColumns].filter(isTruthy);
return columns;
}, [onFolderClick, onTagClick, permissions]);

View File

@@ -6,6 +6,7 @@ import { locationService, reportInteraction } from '@grafana/runtime';
import { Button, Drawer, Dropdown, Icon, Menu, MenuItem, Text } from '@grafana/ui';
import { appEvents } from 'app/core/app_events';
import { Permissions } from 'app/core/components/AccessControl/Permissions';
import { ManageOwnerReferences } from 'app/core/components/OwnerReferences/ManageOwnerReferences';
import { RepoType } from 'app/features/provisioning/Wizard/types';
import { BulkMoveProvisionedResource } from 'app/features/provisioning/components/BulkActions/BulkMoveProvisionedResource';
import { DeleteProvisionedFolderForm } from 'app/features/provisioning/components/Folders/DeleteProvisionedFolderForm';
@@ -30,6 +31,7 @@ interface Props {
export function FolderActionsButton({ folder, repoType, isReadOnlyRepo }: Props) {
const [isOpen, setIsOpen] = useState(false);
const [showPermissionsDrawer, setShowPermissionsDrawer] = useState(false);
const [showManageOwnersDrawer, setShowManageOwnersDrawer] = useState(false);
const [showDeleteProvisionedFolderDrawer, setShowDeleteProvisionedFolderDrawer] = useState(false);
const [showMoveProvisionedFolderDrawer, setShowMoveProvisionedFolderDrawer] = useState(false);
const [moveFolder] = useMoveFolderMutationFacade();
@@ -126,14 +128,18 @@ export function FolderActionsButton({ folder, repoType, isReadOnlyRepo }: Props)
};
const managePermissionsLabel = t('browse-dashboards.folder-actions-button.manage-permissions', 'Manage permissions');
const manageOwnersLabel = t('browse-dashboards.folder-actions-button.manage-folder-owners', 'Manage folder owners');
const moveLabel = t('browse-dashboards.folder-actions-button.move', 'Move this folder');
const deleteLabel = t('browse-dashboards.folder-actions-button.delete', 'Delete this folder');
const showManageOwners = canViewPermissions && !isProvisionedFolder;
const menu = (
<Menu>
{canViewPermissions && !isProvisionedFolder && (
<MenuItem onClick={() => setShowPermissionsDrawer(true)} label={managePermissionsLabel} />
)}
{showManageOwners && <MenuItem onClick={() => setShowManageOwnersDrawer(true)} label={manageOwnersLabel} />}
{canMoveFolder && !isReadOnlyRepo && (
<MenuItem
onClick={isProvisionedFolder ? handleShowMoveProvisionedFolderDrawer : showMoveModal}
@@ -180,6 +186,16 @@ export function FolderActionsButton({ folder, repoType, isReadOnlyRepo }: Props)
<Permissions resource="folders" resourceId={folder.uid} canSetPermissions={canSetPermissions} />
</Drawer>
)}
{showManageOwnersDrawer && (
<Drawer
title={t('browse-dashboards.action.manage-permissions-button', 'Manage owners')}
subtitle={folder.title}
onClose={() => setShowManageOwnersDrawer(false)}
size="md"
>
<ManageOwnerReferences resource="Folder" resourceId={folder.uid} />
</Drawer>
)}
{showDeleteProvisionedFolderDrawer && (
<Drawer
title={

View File

@@ -1,3 +1,4 @@
import { getBackendSrv } from '@grafana/runtime';
import { GENERAL_FOLDER_UID } from 'app/features/search/constants';
import { DashboardViewItem, DashboardViewItemKind } from 'app/features/search/types';
import { createAsyncThunk } from 'app/types/store';
@@ -53,6 +54,10 @@ export const refreshParents = createAsyncThunk(
}
);
const hackGetOwnerRefs = async () => {
return getBackendSrv().get('/apis/folder.grafana.app/v1beta1/namespaces/default/folders');
};
export const refetchChildren = createAsyncThunk(
'browseDashboards/refetchChildren',
async ({ parentUID, pageSize }: RefetchChildrenArgs): Promise<RefetchChildrenResult> => {
@@ -66,6 +71,7 @@ export const refetchChildren = createAsyncThunk(
let fetchKind: DashboardViewItemKind | undefined = 'folder';
let children = await listFolders(uid, undefined, page, pageSize);
let lastPageOfKind = children.length < pageSize;
// If we've loaded all folders, load the first page of dashboards.
@@ -136,6 +142,16 @@ export const fetchNextChildrenPage = createAsyncThunk(
? await listFolders(uid, undefined, page, pageSize)
: await listDashboards(uid, page, pageSize);
const foldersWithOwnerRefs = await hackGetOwnerRefs();
children.forEach((child) => {
const ownerRefs = foldersWithOwnerRefs.items.find((folder) => folder.metadata.name === child.uid)?.metadata
.ownerReferences;
if (ownerRefs) {
child.ownerReferences = ownerRefs;
}
});
let lastPageOfKind = children.length < pageSize;
// If we've loaded all folders, load the first page of dashboards.

View File

@@ -183,7 +183,7 @@ export function createFlatTree(
const items = [thisItem, ...mappedChildren];
if (isSharedWithMe(thisItem.item.uid)) {
if (isSharedWithMe(thisItem.item.uid) || thisItem.item.uid === 'teamfolders') {
items.push({
item: {
kind: 'ui',

View File

@@ -31,7 +31,6 @@ import { UNCONFIGURED_PANEL_PLUGIN_ID } from '../scene/UnconfiguredPanel';
import { DashboardGridItem } from '../scene/layout-default/DashboardGridItem';
import { DashboardLayoutItem, isDashboardLayoutItem } from '../scene/types/DashboardLayoutItem';
import { vizPanelToPanel } from '../serialization/transformSceneToSaveModel';
import { PanelModelCompatibilityWrapper } from '../utils/PanelModelCompatibilityWrapper';
import {
activateSceneObjectAndParentTree,
getDashboardSceneFor,
@@ -121,8 +120,7 @@ export class PanelEditor extends SceneObjectBase<PanelEditorState> {
dataObject.subscribeToState(async () => {
const { data } = dataObject.state;
if (hasData(data) && panel.state.pluginId === UNCONFIGURED_PANEL_PLUGIN_ID) {
const panelModel = new PanelModelCompatibilityWrapper(panel);
const suggestions = await getAllSuggestions(data, panelModel);
const suggestions = await getAllSuggestions(data);
if (suggestions.length > 0) {
const defaultFirstSuggestion = suggestions[0];

View File

@@ -25,7 +25,7 @@ const MIN_COLUMN_SIZE = 260;
export function VisualizationSuggestions({ onChange, data, panel }: Props) {
const styles = useStyles2(getStyles);
const { value: suggestions } = useAsync(() => getAllSuggestions(data, panel), [data, panel]);
const { value: suggestions } = useAsync(async () => await getAllSuggestions(data), [data]);
const [suggestionHash, setSuggestionHash] = useState<string | null>(null);
const [firstCardRef, { width }] = useMeasure<HTMLDivElement>();
const [firstCardHash, setFirstCardHash] = useState<string | null>(null);

View File

@@ -0,0 +1,19 @@
export const panelsToCheckFirst = [
'timeseries',
'barchart',
'gauge',
'stat',
'piechart',
'bargauge',
'table',
'state-timeline',
'status-history',
'logs',
'candlestick',
'flamegraph',
'traces',
'nodeGraph',
'heatmap',
'histogram',
'geomap',
];

View File

@@ -2,11 +2,13 @@ import {
DataFrame,
FieldType,
getDefaultTimeRange,
getPanelDataSummary,
LoadingState,
PanelData,
PanelPluginMeta,
PanelPluginVisualizationSuggestion,
PluginType,
toDataFrame,
VisualizationSuggestionScore,
} from '@grafana/data';
import {
BarGaugeDisplayMode,
@@ -18,26 +20,69 @@ import {
} from '@grafana/schema';
import { config } from 'app/core/config';
import { getAllSuggestions, panelsToCheckFirst } from './getAllSuggestions';
import { panelsToCheckFirst } from './consts';
import { getAllSuggestions, sortSuggestions } from './getAllSuggestions';
config.featureToggles.externalVizSuggestions = true;
let idx = 0;
for (const pluginId of panelsToCheckFirst) {
if (pluginId === 'geomap') {
continue;
}
config.panels[pluginId] = {
module: `core:plugin/${pluginId}`,
id: pluginId,
} as PanelPluginMeta;
module: `core:plugin/${pluginId}`,
sort: idx++,
name: pluginId,
type: PluginType.panel,
baseUrl: 'public/app/plugins/panel',
suggestions: true,
info: {
version: '1.0.0',
updated: '2025-01-01',
links: [],
screenshots: [],
author: {
name: 'Grafana Labs',
},
description: pluginId,
logos: { small: 'small/logo', large: 'large/logo' },
},
};
}
const SCALAR_PLUGINS = ['gauge', 'stat', 'bargauge', 'piechart', 'radialbar'];
config.panels['text'] = {
config.panels.text = {
id: 'text',
module: 'core:plugin/text',
sort: idx++,
name: 'Text',
type: PluginType.panel,
baseUrl: 'public/app/plugins/panel',
skipDataQuery: true,
suggestions: false,
info: {
description: 'pretty decent plugin',
version: '1.0.0',
updated: '2025-01-01',
links: [],
screenshots: [],
author: {
name: 'Grafana Labs',
},
description: 'Text panel',
logos: { small: 'small/logo', large: 'large/logo' },
},
} as PanelPluginMeta;
};
jest.mock('../state/util', () => {
const originalModule = jest.requireActual('../state/util');
return {
...originalModule,
getAllPanelPluginMeta: jest.fn().mockImplementation(() => [...Object.values(config.panels)]),
};
});
const SCALAR_PLUGINS = ['gauge', 'stat', 'bargauge', 'piechart', 'radialbar'];
class ScenarioContext {
data: DataFrame[] = [];
@@ -289,10 +334,8 @@ scenario('Single frame with string and number field', (ctx) => {
pluginId: 'stat',
options: expect.objectContaining({ colorMode: BigValueColorMode.Background }),
}),
expect.objectContaining({
pluginId: 'bargauge',
options: expect.objectContaining({ displayMode: BarGaugeDisplayMode.Basic }),
}),
expect.objectContaining({
pluginId: 'bargauge',
@@ -447,6 +490,70 @@ scenario('Given a preferredVisualisationType', (ctx) => {
});
});
describe('sortSuggestions', () => {
it('should sort suggestions correctly by score', () => {
const suggestions = [
{ pluginId: 'timeseries', name: 'Time series', hash: 'b', score: VisualizationSuggestionScore.OK },
{ pluginId: 'table', name: 'Table', hash: 'a', score: VisualizationSuggestionScore.OK },
{ pluginId: 'stat', name: 'Stat', hash: 'c', score: VisualizationSuggestionScore.Good },
] satisfies PanelPluginVisualizationSuggestion[];
const dataSummary = getPanelDataSummary([
toDataFrame({
fields: [
{ name: 'Time', type: FieldType.time, values: [1, 2, 3, 4, 5] },
{ name: 'ServerA', type: FieldType.number, values: [1, 10, 50, 2, 5] },
{ name: 'ServerB', type: FieldType.number, values: [1, 10, 50, 2, 5] },
],
}),
]);
sortSuggestions(suggestions, dataSummary);
expect(suggestions[0].pluginId).toBe('stat');
expect(suggestions[1].pluginId).toBe('timeseries');
expect(suggestions[2].pluginId).toBe('table');
});
it('should sort suggestions based on core module', () => {
const suggestions = [
{
pluginId: 'fake-external-panel',
name: 'Time series',
hash: 'b',
score: VisualizationSuggestionScore.Good,
},
{
pluginId: 'fake-external-panel',
name: 'Time series',
hash: 'd',
score: VisualizationSuggestionScore.Best,
},
{ pluginId: 'timeseries', name: 'Table', hash: 'a', score: VisualizationSuggestionScore.OK },
{ pluginId: 'stat', name: 'Stat', hash: 'c', score: VisualizationSuggestionScore.Good },
] satisfies PanelPluginVisualizationSuggestion[];
const dataSummary = getPanelDataSummary([
toDataFrame({
fields: [
{ name: 'Time', type: FieldType.time, values: [1, 2, 3, 4, 5] },
{ name: 'ServerA', type: FieldType.number, values: [1, 10, 50, 2, 5] },
{ name: 'ServerB', type: FieldType.number, values: [1, 10, 50, 2, 5] },
],
}),
]);
sortSuggestions(suggestions, dataSummary);
expect(suggestions[0].pluginId).toBe('stat');
expect(suggestions[1].pluginId).toBe('timeseries');
expect(suggestions[2].pluginId).toBe('fake-external-panel');
expect(suggestions[2].hash).toBe('d');
expect(suggestions[3].pluginId).toBe('fake-external-panel');
expect(suggestions[3].hash).toBe('b');
});
});
function repeatFrame(count: number, frame: DataFrame): DataFrame[] {
const frames: DataFrame[] = [];
for (let i = 0; i < count; i++) {

View File

@@ -1,33 +1,52 @@
import {
getPanelDataSummary,
PanelData,
PanelDataSummary,
PanelPlugin,
PanelPluginVisualizationSuggestion,
VisualizationSuggestionsBuilder,
PanelModel,
VisualizationSuggestionScore,
PreferredVisualisationType,
VisualizationSuggestionScore,
} from '@grafana/data';
import { config } from '@grafana/runtime';
import { importPanelPlugin } from 'app/features/plugins/importPanelPlugin';
import { importPanelPlugin, isBuiltInPlugin } from 'app/features/plugins/importPanelPlugin';
export const panelsToCheckFirst = [
'timeseries',
'barchart',
'gauge',
'stat',
'piechart',
'bargauge',
'table',
'state-timeline',
'status-history',
'logs',
'candlestick',
'flamegraph',
'traces',
'nodeGraph',
'heatmap',
'histogram',
'geomap',
];
import { getAllPanelPluginMeta } from '../state/util';
import { panelsToCheckFirst } from './consts';
/**
* gather and cache the plugins which provide visualization suggestions so they can be invoked to build suggestions
*/
let _pluginCache: PanelPlugin[] | null = null;
async function getPanelsWithSuggestions(): Promise<PanelPlugin[]> {
if (!_pluginCache) {
_pluginCache = [];
// list of plugins to load is determined by the feature flag
const pluginIds: string[] = config.featureToggles.externalVizSuggestions
? getAllPanelPluginMeta()
.filter((panel) => panel.suggestions)
.map((m) => m.id)
: panelsToCheckFirst;
// import the plugins in parallel using Promise.allSettled
const settledPromises = await Promise.allSettled(pluginIds.map((id) => importPanelPlugin(id)));
for (let i = 0; i < settledPromises.length; i++) {
const settled = settledPromises[i];
if (settled.status === 'fulfilled') {
_pluginCache.push(settled.value);
}
// TODO: do we want to somehow log if there were errors loading some of the plugins?
}
}
if (_pluginCache.length === 0) {
throw new Error('No panel plugins with visualization suggestions found');
}
return _pluginCache;
}
/**
* some of the PreferredVisualisationTypes do not match the panel plugin ids, so we have to map them. d'oh.
@@ -44,24 +63,54 @@ const mapPreferredVisualisationTypeToPlugin = (type: string): PreferredVisualisa
return PLUGIN_ID_TO_PREFERRED_VIZ_TYPE[type];
};
export async function getAllSuggestions(
data?: PanelData,
panel?: PanelModel
): Promise<PanelPluginVisualizationSuggestion[]> {
const builder = new VisualizationSuggestionsBuilder(data, panel);
/**
* given a list of suggestions, sort them in place based on score and preferred visualisation type
*/
export function sortSuggestions(suggestions: PanelPluginVisualizationSuggestion[], dataSummary: PanelDataSummary) {
suggestions.sort((a, b) => {
// if one of these suggestions is from a built-in panel and the other isn't, prioritize the core panel.
const isPluginABuiltIn = isBuiltInPlugin(a.pluginId);
const isPluginBBuiltIn = isBuiltInPlugin(b.pluginId);
if (isPluginABuiltIn && !isPluginBBuiltIn) {
return -1;
}
if (isPluginBBuiltIn && !isPluginABuiltIn) {
return 1;
}
for (const pluginId of panelsToCheckFirst) {
const plugin = await importPanelPlugin(pluginId);
const supplier = plugin.getSuggestionsSupplier();
// if a preferred visualisation type matches the data, prioritize it
const mappedA = mapPreferredVisualisationTypeToPlugin(a.pluginId);
if (mappedA && dataSummary.hasPreferredVisualisationType(mappedA)) {
return -1;
}
const mappedB = mapPreferredVisualisationTypeToPlugin(a.pluginId);
if (mappedB && dataSummary.hasPreferredVisualisationType(mappedB)) {
return 1;
}
if (supplier) {
supplier.getSuggestionsForData(builder);
// compare scores directly if there are no other factors
return (b.score ?? VisualizationSuggestionScore.OK) - (a.score ?? VisualizationSuggestionScore.OK);
});
}
/**
* given PanelData, return a sorted list of Suggestions from all plugins which support it.
* @param {PanelData} data queried and transformed data for the panel
* @returns {PanelPluginVisualizationSuggestion[]} sorted list of suggestions
*/
export async function getAllSuggestions(data?: PanelData): Promise<PanelPluginVisualizationSuggestion[]> {
const dataSummary = getPanelDataSummary(data?.series);
const list: PanelPluginVisualizationSuggestion[] = [];
const plugins = await getPanelsWithSuggestions();
for (const plugin of plugins) {
const suggestions = plugin.getSuggestions(dataSummary);
if (suggestions) {
list.push(...suggestions);
}
}
const list = builder.getList();
if (builder.dataSummary.fieldCount === 0) {
if (dataSummary.fieldCount === 0) {
for (const plugin of Object.values(config.panels)) {
if (!plugin.skipDataQuery || plugin.hideFromList) {
continue;
@@ -79,15 +128,7 @@ export async function getAllSuggestions(
}
}
return list.sort((a, b) => {
const mappedA = mapPreferredVisualisationTypeToPlugin(a.pluginId);
if (mappedA && builder.dataSummary.hasPreferredVisualisationType(mappedA)) {
return -1;
}
const mappedB = mapPreferredVisualisationTypeToPlugin(a.pluginId);
if (mappedB && builder.dataSummary.hasPreferredVisualisationType(mappedB)) {
return 1;
}
return (b.score ?? VisualizationSuggestionScore.OK) - (a.score ?? VisualizationSuggestionScore.OK);
});
sortSuggestions(list, dataSummary);
return list;
}

View File

@@ -115,4 +115,8 @@ const builtInPlugins: Record<string, System.Module | (() => Promise<System.Modul
'core:plugin/radialbar': radialBar,
};
export function isBuiltinPluginPath(path: string): path is keyof typeof builtInPlugins {
return Boolean(builtInPlugins[path]);
}
export default builtInPlugins;

View File

@@ -1,6 +1,7 @@
import { PanelPlugin, PanelPluginMeta } from '@grafana/data';
import config from 'app/core/config';
import builtInPlugins, { isBuiltinPluginPath } from './built_in_plugins';
import { pluginImporter } from './importer/pluginImporter';
const promiseCache: Record<string, Promise<PanelPlugin>> = {};
@@ -25,6 +26,14 @@ export function importPanelPlugin(id: string): Promise<PanelPlugin> {
return promiseCache[id];
}
export function isBuiltInPlugin(id?: string): id is keyof typeof builtInPlugins {
if (!id) {
return false;
}
const meta = getPanelPluginMeta(id);
return Boolean(meta != null && isBuiltinPluginPath(meta.module));
}
export function hasPanelPlugin(id: string): boolean {
return !!getPanelPluginMeta(id);
}

View File

@@ -2,7 +2,7 @@ import { DEFAULT_LANGUAGE } from '@grafana/i18n';
import { getResolvedLanguage } from '@grafana/i18n/internal';
import { config } from '@grafana/runtime';
import builtInPlugins from '../built_in_plugins';
import builtInPlugins, { isBuiltinPluginPath } from '../built_in_plugins';
import { registerPluginInfoInCache } from '../loader/pluginInfoCache';
import { SystemJS } from '../loader/systemjs';
import { resolveModulePath } from '../loader/utils';
@@ -35,8 +35,8 @@ export async function importPluginModule({
});
}
const builtIn = builtInPlugins[path];
if (builtIn) {
if (isBuiltinPluginPath(path)) {
const builtIn = builtInPlugins[path];
// for handling dynamic imports
if (typeof builtIn === 'function') {
return await builtIn();

View File

@@ -1,10 +1,11 @@
import { css } from '@emotion/css';
import { FormEvent } from 'react';
import { FormEvent, useMemo } from 'react';
import { useListTeamQuery } from '@grafana/api-clients/rtkq/iam/v0alpha1';
import { GrafanaTheme2, SelectableValue } from '@grafana/data';
import { Trans, t } from '@grafana/i18n';
import { config } from '@grafana/runtime';
import { Button, Checkbox, Stack, RadioButtonGroup, useStyles2 } from '@grafana/ui';
import { Button, Checkbox, Stack, RadioButtonGroup, useStyles2, Combobox } from '@grafana/ui';
import { SortPicker } from 'app/core/components/Select/SortPicker';
import { TagFilter, TermCount } from 'app/core/components/TagFilter/TagFilter';
@@ -76,10 +77,25 @@ export const ActionRow = ({
? [SearchLayout.Folders]
: [];
const teams = useListTeamQuery({});
const teamOptions = useMemo(() => {
return teams.data?.items.map((team) => ({
label: team.spec.title,
value: team.metadata.name || '',
}));
}, [teams.data?.items]);
return (
<Stack justifyContent="space-between" alignItems="center">
<Stack gap={2} alignItems="center">
<TagFilter isClearable={false} tags={state.tag} tagOptions={getTagOptions} onChange={onTagFilterChange} />
<Combobox
prefixIcon="user-arrows"
onChange={() => {}}
placeholder="Filter by owner"
options={teamOptions || []}
isClearable={false}
/>
{config.featureToggles.panelTitleSearch && (
<Checkbox
data-testid="include-panels"
@@ -99,6 +115,13 @@ export const ActionRow = ({
/>
</div>
)}
{/* <div className={styles.checkboxWrapper}>
<Checkbox
label={t('search.actions.owned-by-me', 'My team folders')}
onChange={onStarredFilterChange}
value={state.teamFolders}
/>
</div> */}
{state.datasource && (
<Button icon="times" variant="secondary" onClick={() => onDatasourceChange(undefined)}>
<Trans i18nKey="search.actions.remove-datasource-filter">

View File

@@ -416,7 +416,7 @@ export function toDashboardResults(rsp: SearchAPIResponse, sort: string): DataFr
async function loadLocationInfo(): Promise<Record<string, LocationInfo>> {
// TODO: use proper pagination for search.
const uri = `${searchURI}?type=folders&limit=100000`;
const uri = `${searchURI}?type=folder&limit=100000`;
const rsp = getBackendSrv()
.get<SearchAPIResponse>(uri)
.then((rsp) => {

View File

@@ -60,8 +60,11 @@ export function getIconForKind(kind: string, isOpen?: boolean): IconName {
}
export function getIconForItem(item: DashboardViewItemWithUIItems, isOpen?: boolean): IconName {
if (item && isSharedWithMe(item.uid)) {
if (item.uid === 'teamfolders') {
return 'users-alt';
}
if (item && isSharedWithMe(item.uid)) {
return 'share-alt';
} else {
return getIconForKind(item.kind, isOpen);
}

View File

@@ -1,5 +1,6 @@
import { Action } from 'redux';
import { OwnerReference } from '@grafana/api-clients/rtkq/folder/v1beta1';
import { WithAccessControlMetadata } from '@grafana/data';
import { ManagerKind } from '../apiserver/types';
@@ -83,6 +84,7 @@ export interface DashboardViewItem {
sortMeta?: number | string; // value sorted by
sortMetaName?: string; // name of the value being sorted e.g. 'Views'
managedBy?: ManagerKind;
ownerReferences?: OwnerReference[];
}
export interface SearchAction extends Action {

View File

@@ -2,7 +2,7 @@ import { UserEvent } from '@testing-library/user-event';
import { Route, Routes } from 'react-router-dom-v5-compat';
import { render, screen, waitFor } from 'test/test-utils';
import { setBackendSrv } from '@grafana/runtime';
import { config, setBackendSrv } from '@grafana/runtime';
import { setupMockServer } from '@grafana/test-utils/server';
import { MOCK_TEAMS } from '@grafana/test-utils/unstable';
import { backendSrv } from 'app/core/services/backend_srv';
@@ -27,9 +27,15 @@ const setup = async () => {
return view;
};
const attemptCreateTeam = async (user: UserEvent, teamName?: string, teamEmail?: string) => {
const attemptCreateTeam = async (
user: UserEvent,
teamName?: string,
teamEmail?: string,
createTeamFolder?: boolean
) => {
teamName && (await user.type(screen.getByRole('textbox', { name: /name/i }), teamName));
teamEmail && (await user.type(screen.getByLabelText(/email/i), teamEmail));
createTeamFolder && (await user.click(screen.getByLabelText(/auto-create a team folder/i)));
await user.click(screen.getByRole('button', { name: /create/i }));
};
@@ -72,4 +78,22 @@ describe('Create team', () => {
expect(screen.queryByText(/edit team page/i)).not.toBeInTheDocument();
});
describe('team folders enabled', () => {
const originalFeatureToggles = config.featureToggles;
beforeEach(() => {
config.featureToggles = { ...originalFeatureToggles, teamFolders: true };
});
afterEach(() => {
config.featureToggles = originalFeatureToggles;
});
it('renders team folder checkbox', async () => {
const { user } = await setup();
await attemptCreateTeam(user, MOCK_TEAMS[0].spec.title, undefined, true);
expect(screen.queryByText(/edit team page/i)).not.toBeInTheDocument();
});
});
});

View File

@@ -3,8 +3,8 @@ import { useForm } from 'react-hook-form';
import { NavModelItem } from '@grafana/data';
import { Trans, t } from '@grafana/i18n';
import { locationService } from '@grafana/runtime';
import { Button, Field, Input, FieldSet, Stack } from '@grafana/ui';
import { config, locationService } from '@grafana/runtime';
import { Button, Field, Input, FieldSet, Stack, Checkbox, Alert } from '@grafana/ui';
import { extractErrorMessage } from 'app/api/utils';
import { Page } from 'app/core/components/Page/Page';
import { TeamRolePicker } from 'app/core/components/RolePicker/TeamRolePicker';
@@ -16,34 +16,42 @@ import { TeamDTO } from 'app/types/teams';
import { useCreateTeam } from './hooks';
const pageNav: NavModelItem = {
icon: 'users-alt',
id: 'team-new',
text: 'New team',
subTitle: 'Create a new team. Teams let you grant permissions to a group of users.',
};
type NewTeamForm = TeamDTO & { createTeamFolder?: boolean };
const CreateTeam = (): JSX.Element => {
export const CreateTeam = (): JSX.Element => {
const pageNav: NavModelItem = {
icon: 'users-alt',
id: 'team-new',
text: t('teams.create-team.page-title', 'New team'),
subTitle: t(
'teams.create-team.page-subtitle',
'Create a new team. Teams let you grant permissions to a group of users.'
),
};
const teamFoldersEnabled = config.featureToggles.teamFolders;
const showRolesPicker = contextSrv.licensedAccessControlEnabled();
const currentOrgId = contextSrv.user.orgId;
const notifyApp = useAppNotification();
const [createTeamTrigger] = useCreateTeam();
const [createTeamTrigger, createResponse] = useCreateTeam();
const [pendingRoles, setPendingRoles] = useState<Role[]>([]);
const [{ roleOptions }] = useRoleOptions(currentOrgId);
const {
handleSubmit,
register,
formState: { errors },
} = useForm<TeamDTO>();
} = useForm<NewTeamForm>();
const createTeam = async (formModel: TeamDTO) => {
const createTeam = async (formModel: NewTeamForm) => {
try {
const { data, error } = await createTeamTrigger(
{
email: formModel.email || '',
name: formModel.name,
},
pendingRoles
pendingRoles,
formModel.createTeamFolder
);
const errorMessage = error ? extractErrorMessage(error) : undefined;
@@ -73,11 +81,11 @@ const CreateTeam = (): JSX.Element => {
label={t('teams.create-team.label-name', 'Name')}
required
invalid={!!errors.name}
error="Team name is required"
error={t('teams.create-team.error-name-required', 'Team name is required')}
>
<Input {...register('name', { required: true })} id="team-name" />
</Field>
{contextSrv.licensedAccessControlEnabled() && (
{showRolesPicker && (
<Field noMargin label={t('teams.create-team.label-role', 'Role')}>
<TeamRolePicker
teamId={0}
@@ -106,8 +114,37 @@ const CreateTeam = (): JSX.Element => {
placeholder="email@test.com"
/>
</Field>
{teamFoldersEnabled && (
<Field
noMargin
label={t('teams.create-team.team-folder', 'Team folder')}
description={t(
'teams.create-team.description-team-folder',
'This creates a folder associated with the team, where users can add resources like dashboards and schedules with the right permissions.'
)}
>
<Checkbox
{...register('createTeamFolder')}
id="team-folder"
label={t(
'teams.create-team.team-folder-label-autocreate-a-team-folder',
'Auto-create a team folder'
)}
/>
</Field>
)}
</Stack>
</FieldSet>
{Boolean(createResponse.error) && (
<Alert title={t('teams.create-team.error-title', 'Error creating team')} severity="error">
<Trans i18nKey="teams.create-team.error-message">
We were unable to create your new team. Please try again later or contact support.
</Trans>
<br />
<br />
<div>{extractErrorMessage(createResponse.error)}</div>
</Alert>
)}
<Button type="submit" variant="primary">
<Trans i18nKey="teams.create-team.create">Create</Trans>
</Button>

View File

@@ -0,0 +1,23 @@
import { useListFolderQuery } from '@grafana/api-clients/rtkq/folder/v1beta1';
import { Stack, Text, Link, Icon } from '@grafana/ui';
import { Team } from 'app/types/teams';
export const OwnedResources = ({ team }: { team: Team }) => {
const { data } = useListFolderQuery({});
const ownedFolders = data?.items.filter((folder) =>
folder.metadata.ownerReferences?.some((ref) => ref.uid === team.uid)
);
return (
<Stack gap={1} direction="column">
<Text variant="h3">Owned folders:</Text>
{ownedFolders &&
ownedFolders.map((folder) => (
<div key={folder.metadata.uid}>
<Link href={`/dashboards/f/${folder.metadata.name}`}>
<Icon name="folder" /> <Text>{folder.spec.title}</Text>
</Link>
</div>
))}
</Stack>
);
};

View File

@@ -11,6 +11,7 @@ import { contextSrv } from 'app/core/services/context_srv';
import { AccessControlAction } from 'app/types/accessControl';
import { StoreState, useSelector } from 'app/types/store';
import { OwnedResources } from './OwnedResources';
import TeamGroupSync, { TeamSyncUpgradeContent } from './TeamGroupSync';
import TeamPermissions from './TeamPermissions';
import TeamSettings from './TeamSettings';
@@ -26,9 +27,10 @@ enum PageTypes {
Members = 'members',
Settings = 'settings',
GroupSync = 'groupsync',
Resources = 'resources',
}
const PAGES = ['members', 'settings', 'groupsync'];
const PAGES = ['members', 'settings', 'groupsync', 'resources'];
const pageNavSelector = createSelector(
[
@@ -59,24 +61,30 @@ const TeamPages = memo(() => {
const renderPage = () => {
const currentPage = PAGES.includes(pageName) ? pageName : PAGES[0];
const canReadTeam = contextSrv.hasPermissionInMetadata(AccessControlAction.ActionTeamsRead, team!);
if (!team) {
return null;
}
const canReadTeam = contextSrv.hasPermissionInMetadata(AccessControlAction.ActionTeamsRead, team);
const canReadTeamPermissions = contextSrv.hasPermissionInMetadata(
AccessControlAction.ActionTeamsPermissionsRead,
team!
team
);
const canWriteTeamPermissions = contextSrv.hasPermissionInMetadata(
AccessControlAction.ActionTeamsPermissionsWrite,
team!
team
);
switch (currentPage) {
case PageTypes.Members:
if (canReadTeamPermissions) {
return <TeamPermissions team={team!} />;
return <TeamPermissions team={team} />;
}
return null;
case PageTypes.Settings:
return canReadTeam && <TeamSettings team={team!} />;
return canReadTeam && <TeamSettings team={team} />;
case PageTypes.Resources:
return canReadTeam && <OwnedResources team={team} />;
case PageTypes.GroupSync:
if (isSyncEnabled.current) {
if (canReadTeamPermissions) {

View File

@@ -1,7 +1,7 @@
import { useForm } from 'react-hook-form';
import { Trans, t } from '@grafana/i18n';
import { Button, Field, FieldSet, Input, Stack } from '@grafana/ui';
import { Button, Divider, Field, FieldSet, Input, Stack } from '@grafana/ui';
import { TeamRolePicker } from 'app/core/components/RolePicker/TeamRolePicker';
import { useRoleOptions } from 'app/core/components/RolePicker/hooks';
import { SharedPreferences } from 'app/core/components/SharedPreferences/SharedPreferences';
@@ -97,6 +97,7 @@ const TeamSettings = ({ team }: Props) => {
<Trans i18nKey="teams.team-settings.save">Save team details</Trans>
</Button>
</form>
<Divider />
<SharedPreferences resourceUri={`teams/${team.id}`} disabled={!canWriteTeamSettings} preferenceType="team" />
</Stack>
);

View File

@@ -1,6 +1,7 @@
import { skipToken } from '@reduxjs/toolkit/query';
import { useEffect, useMemo } from 'react';
import { useCreateFolder } from 'app/api/clients/folder/v1beta1/hooks';
import {
useSearchTeamsQuery as useLegacySearchTeamsQuery,
useCreateTeamMutation,
@@ -127,14 +128,16 @@ export const useDeleteTeam = () => {
export const useCreateTeam = () => {
const [createTeam, response] = useCreateTeamMutation();
const [setTeamRoles] = useSetTeamRolesMutation();
const [createFolder] = useCreateFolder();
const trigger = async (team: CreateTeamCommand, pendingRoles?: Role[]) => {
const trigger = async (team: CreateTeamCommand, pendingRoles?: Role[], createTeamFolder?: boolean) => {
const mutationResult = await createTeam({
createTeamCommand: team,
});
const { data } = mutationResult;
// Add any pending roles to the team
if (data && data.teamId && pendingRoles && pendingRoles.length) {
await contextSrv.fetchUserPermissions();
if (contextSrv.licensedAccessControlEnabled() && canUpdateRoles()) {
@@ -147,6 +150,14 @@ export const useCreateTeam = () => {
}
}
if (data && data.teamId && createTeamFolder) {
await createFolder({
title: team.name,
createAsTeamFolder: true,
teamUid: data.uid,
});
}
return mutationResult;
};

View File

@@ -59,6 +59,13 @@ export function buildNavModel(team: Team): NavModelItem {
url: `org/teams/edit/${team.uid}/members`,
});
}
navModel.children!.push({
active: false,
icon: 'folder',
id: `team-resources-${team.uid}`,
text: 'Resources',
url: `org/teams/edit/${team.uid}/resources`,
});
const teamGroupSync: NavModelItem = {
active: false,

View File

@@ -2,7 +2,7 @@
"type": "panel",
"name": "Bar chart",
"id": "barchart",
"suggestions": true,
"info": {
"description": "Categorical charts with group support",
"author": {

View File

@@ -1,6 +1,6 @@
import { defaultsDeep } from 'lodash';
import { FieldType, VisualizationSuggestion, VisualizationSuggestionsSupplierFn, VizOrientation } from '@grafana/data';
import { FieldType, VisualizationSuggestion, VisualizationSuggestionsSupplier, VizOrientation } from '@grafana/data';
import { t } from '@grafana/i18n';
import { LegendDisplayMode, StackingMode, VisibilityMode } from '@grafana/schema';
@@ -32,7 +32,7 @@ const withDefaults = (suggestion: VisualizationSuggestion<Options, FieldConfig>)
},
} satisfies VisualizationSuggestion<Options, FieldConfig>);
export const barchartSuggestionsSupplier: VisualizationSuggestionsSupplierFn<Options, FieldConfig> = (dataSummary) => {
export const barchartSuggestionsSupplier: VisualizationSuggestionsSupplier<Options, FieldConfig> = (dataSummary) => {
if (dataSummary.frameCount !== 1) {
return;
}

View File

@@ -2,7 +2,7 @@
"type": "panel",
"name": "Bar gauge",
"id": "bargauge",
"suggestions": true,
"info": {
"description": "Horizontal and vertical gauges",
"author": {

View File

@@ -4,7 +4,7 @@ import {
FieldColorModeId,
FieldType,
VisualizationSuggestion,
VisualizationSuggestionsSupplierFn,
VisualizationSuggestionsSupplier,
VizOrientation,
} from '@grafana/data';
import { t } from '@grafana/i18n';
@@ -31,7 +31,7 @@ const withDefaults = (suggestion: VisualizationSuggestion<Options>): Visualizati
const BAR_LIMIT = 30;
export const barGaugeSugggestionsSupplier: VisualizationSuggestionsSupplierFn<Options> = (dataSummary) => {
export const barGaugeSugggestionsSupplier: VisualizationSuggestionsSupplier<Options> = (dataSummary) => {
if (!dataSummary.hasData || !dataSummary.hasFieldType(FieldType.number)) {
return;
}

View File

@@ -2,7 +2,7 @@
"type": "panel",
"name": "Candlestick",
"id": "candlestick",
"suggestions": true,
"info": {
"description": "Graphical representation of price movements of a security, derivative, or currency.",
"keywords": ["financial", "price", "currency", "k-line"],

View File

@@ -1,10 +1,10 @@
import { FieldType, VisualizationSuggestionScore, VisualizationSuggestionsSupplierFn } from '@grafana/data';
import { FieldType, VisualizationSuggestionScore, VisualizationSuggestionsSupplier } from '@grafana/data';
import { config } from '@grafana/runtime';
import { prepareCandlestickFields } from './fields';
import { defaultOptions, Options } from './types';
export const candlestickSuggestionSupplier: VisualizationSuggestionsSupplierFn<Options> = (dataSummary) => {
export const candlestickSuggestionSupplier: VisualizationSuggestionsSupplier<Options> = (dataSummary) => {
if (
!dataSummary.rawFrames ||
!dataSummary.hasData ||

View File

@@ -2,7 +2,7 @@
"type": "panel",
"name": "Flame Graph",
"id": "flamegraph",
"suggestions": true,
"info": {
"author": {
"name": "Grafana Labs",

View File

@@ -2,7 +2,7 @@
"type": "panel",
"name": "Gauge",
"id": "gauge",
"suggestions": true,
"info": {
"description": "Standard gauge visualization",
"author": {

View File

@@ -1,6 +1,6 @@
import { defaultsDeep } from 'lodash';
import { ThresholdsMode, FieldType, VisualizationSuggestion, VisualizationSuggestionsSupplierFn } from '@grafana/data';
import { ThresholdsMode, FieldType, VisualizationSuggestion, VisualizationSuggestionsSupplier } from '@grafana/data';
import { t } from '@grafana/i18n';
import { defaultNumericVizOptions } from 'app/features/panel/suggestions/utils';
@@ -33,7 +33,7 @@ const withDefaults = (suggestion: VisualizationSuggestion<Options>): Visualizati
const GAUGE_LIMIT = 10;
export const gaugeSuggestionsSupplier: VisualizationSuggestionsSupplierFn<Options> = (dataSummary) => {
export const gaugeSuggestionsSupplier: VisualizationSuggestionsSupplier<Options> = (dataSummary) => {
if (!dataSummary.hasData || !dataSummary.hasFieldType(FieldType.number)) {
return;
}

View File

@@ -2,7 +2,7 @@
"type": "panel",
"name": "Geomap",
"id": "geomap",
"suggestions": true,
"info": {
"description": "Geomap panel",
"author": {

View File

@@ -1,12 +1,10 @@
import { VisualizationSuggestionScore, VisualizationSuggestionsSupplierFn } from '@grafana/data';
import { VisualizationSuggestionScore, VisualizationSuggestionsSupplier } from '@grafana/data';
import { GraphFieldConfig } from '@grafana/ui';
import { getGeometryField, getDefaultLocationMatchers } from 'app/features/geo/utils/location';
import { Options } from './panelcfg.gen';
export const geomapSuggestionsSupplier: VisualizationSuggestionsSupplierFn<Options, GraphFieldConfig> = (
dataSummary
) => {
export const geomapSuggestionsSupplier: VisualizationSuggestionsSupplier<Options, GraphFieldConfig> = (dataSummary) => {
if (!dataSummary.hasData || !dataSummary.rawFrames) {
return;
}

View File

@@ -2,7 +2,7 @@
"type": "panel",
"name": "Heatmap",
"id": "heatmap",
"suggestions": true,
"info": {
"description": "Like a histogram over time",
"author": {

View File

@@ -3,7 +3,7 @@ import {
FieldType,
PanelDataSummary,
VisualizationSuggestionScore,
VisualizationSuggestionsSupplierFn,
VisualizationSuggestionsSupplier,
} from '@grafana/data';
import { config } from '@grafana/runtime';
import { GraphFieldConfig } from '@grafana/schema';
@@ -43,7 +43,7 @@ function determineScore(dataSummary: PanelDataSummary): VisualizationSuggestionS
return VisualizationSuggestionScore.OK;
}
export const heatmapSuggestionsSupplier: VisualizationSuggestionsSupplierFn<Options, GraphFieldConfig> = (
export const heatmapSuggestionsSupplier: VisualizationSuggestionsSupplier<Options, GraphFieldConfig> = (
dataSummary: PanelDataSummary
) => {
if (

View File

@@ -2,7 +2,7 @@
"type": "panel",
"name": "Histogram",
"id": "histogram",
"suggestions": true,
"info": {
"description": "Distribution of values presented as a bar chart.",
"keywords": ["distribution", "bar chart", "frequency", "proportional"],

View File

@@ -2,7 +2,7 @@
"type": "panel",
"name": "Logs",
"id": "logs",
"suggestions": true,
"info": {
"author": {
"name": "Grafana Labs",

View File

@@ -2,7 +2,7 @@
"type": "panel",
"name": "Node Graph",
"id": "nodeGraph",
"suggestions": true,
"info": {
"author": {
"name": "Grafana Labs",

View File

@@ -1,4 +1,4 @@
import { DataFrame, FieldType, VisualizationSuggestionScore, VisualizationSuggestionsSupplierFn } from '@grafana/data';
import { DataFrame, FieldType, VisualizationSuggestionScore, VisualizationSuggestionsSupplier } from '@grafana/data';
import { Options } from './panelcfg.gen';
@@ -44,7 +44,7 @@ function frameHasCorrectFields(frames: DataFrame[]): boolean {
return hasNodesFrame && hasEdgesFrame;
}
export const nodeGraphSuggestionsSupplier: VisualizationSuggestionsSupplierFn<Options> = (dataSummary) => {
export const nodeGraphSuggestionsSupplier: VisualizationSuggestionsSupplier<Options> = (dataSummary) => {
if (!dataSummary.rawFrames) {
return;
}

View File

@@ -2,7 +2,7 @@
"type": "panel",
"name": "Pie chart",
"id": "piechart",
"suggestions": true,
"info": {
"description": "The new core pie chart visualization",
"author": {

View File

@@ -4,7 +4,7 @@ import {
FieldType,
VisualizationSuggestion,
VisualizationSuggestionScore,
VisualizationSuggestionsSupplierFn,
VisualizationSuggestionsSupplier,
} from '@grafana/data';
import { t } from '@grafana/i18n';
import { LegendDisplayMode } from '@grafana/schema';
@@ -29,7 +29,7 @@ const withDefaults = (suggestion: VisualizationSuggestion<Options>): Visualizati
const SLICE_MAX = 30;
const SLICE_MIN = 2;
export const piechartSuggestionsSupplier: VisualizationSuggestionsSupplierFn<Options> = (dataSummary) => {
export const piechartSuggestionsSupplier: VisualizationSuggestionsSupplier<Options> = (dataSummary) => {
if (!dataSummary.hasFieldType(FieldType.number)) {
return;
}

View File

@@ -3,6 +3,7 @@
"name": "New Gauge",
"id": "radialbar",
"state": "alpha",
"suggestions": false,
"info": {
"description": "Standard gauge visualization",
"author": {

Some files were not shown because too many files have changed in this diff Show More