Compare commits

...

7 Commits

Author SHA1 Message Date
Rafael Paulovic
7c6c999347 chore: fix linter 2025-12-11 10:06:04 +01:00
Rafael Paulovic
8d01bccea7 chore: fix tests 2025-12-11 10:06:04 +01:00
Rafael Bortolon Paulovic
fb067c3563 feat: legacy ListIterator with batches (#115038)
* feat: legacy ListIterator with batches

* chore: address code review

* chore: remove nil check in nextBatch

* chore: move close before count check

* chore: add err field to batchingIterator for its own errors

* chore: remove unused import
2025-12-11 10:06:04 +01:00
Rafael Bortolon Paulovic
cb381f967c feat(dashboard): Org-aware cache for schema migration (#115025)
* fix: use dsIndexProvider cache on migrations

* chore: use same comment as before

* feat: org-aware TTL cache for schemaversion migration and warmup for single tenant

* chore: use LRU cache

* chore: change DefaultCacheTTL to 1 minute

* chore: address copilot reviews

* chore: use expirable cache

* chore: remove unused import
2025-12-11 10:06:04 +01:00
Rafael Bortolon Paulovic
f7c91424e5 fix: use dsIndexProvider cache on schema migrations (#115018)
* fix: use dsIndexProvider cache on migrations

* chore: use same comment as before
2025-12-11 10:06:04 +01:00
Jean-Philippe Quéméner
b2cd4e68c9 fix(unified-storage): check auth before getting value (#114995) 2025-12-11 10:06:04 +01:00
maicon
8c8ac8efbe RRC Slow Channel Patch 1 (#114811)
* chore: bump authlib/types to v0.0.0-20251119142549-be091cf2f4d4 (#114177)

* chore: bump authlib/types to v0.0.0-20251119142549-be091cf2f4d4

Signed-off-by: Maicon Costa <maiconscosta@gmail.com>

* Update Go Workspace

Signed-off-by: Maicon Costa <maiconscosta@gmail.com>

* Stop supporting deprecated namespace format in TestExtendedJWT_Authenticate

Signed-off-by: Maicon Costa <maiconscosta@gmail.com>

* Update go mod

Signed-off-by: Maicon Costa <maiconscosta@gmail.com>

---------

Signed-off-by: Maicon Costa <maiconscosta@gmail.com>

* Fix search by both tags and folders. (#114246)

* Fix search by both tags and folders.

* Move // nolint:gocyclo to the new method.

* Revert unnecessary change.

* fix: update search request for existing provisioned dashboards in modes 3+ (#114412)

Fix search for existing provisioned dashboards in modes 3+

The search query was not requesting the dashboard's "legacy ID". As a result,
the provisioning process would not find existing provisioned dashboards, making
copies of these dashboards every time there was a change in the provisioned
dashboard's definition.

* Dashboards: Prevent panic in validation (#114436)

* provisioning: acquire server lock before provisioning dashboards+folders (#114488)

* provisioning: acquire server lock before provisioning dashboards+folders

Signed-off-by: Maicon Costa <maiconscosta@gmail.com>

---------

Signed-off-by: Maicon Costa <maiconscosta@gmail.com>

* feat(unified-storage): Add adaptive backoff to event notifier polling (#114401)

* use exponential backoff in notifier

* Enhance BadgerDB configuration in REST options with memory table size and number of memtables

* Enhance BadgerDB configuration in REST options by adding value threshold for LSM vs value log storage

* Provisioning: Fix panic on watcher when channel is closed (#114439)

---------

Signed-off-by: Maicon Costa <maiconscosta@gmail.com>
Co-authored-by: Peter Štibraný <pstibrany@gmail.com>
Co-authored-by: Renato Costa <103441181+renatolabs@users.noreply.github.com>
Co-authored-by: Stephanie Hingtgen <stephanie.hingtgen@grafana.com>
Co-authored-by: Georges Chaudy <chaudyg@gmail.com>
2025-12-05 10:29:29 +01:00
69 changed files with 2871 additions and 353 deletions

View File

@@ -6,7 +6,7 @@ require (
github.com/Masterminds/semver/v3 v3.4.0
github.com/google/go-cmp v0.7.0
github.com/google/go-github/v70 v70.0.0
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4
github.com/grafana/grafana v0.0.0-00010101000000-000000000000
github.com/grafana/grafana-app-sdk v0.48.2
github.com/grafana/grafana-app-sdk/logging v0.48.1

View File

@@ -651,8 +651,8 @@ github.com/grafana/alerting v0.0.0-20251009192429-9427c24835ae h1:NLPwY3tIP0lg0g
github.com/grafana/alerting v0.0.0-20251009192429-9427c24835ae/go.mod h1:VGjS5gDwWEADPP6pF/drqLxEImgeuHlEW5u8E5EfIrM=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 h1:Muoy+FMGrHj3GdFbvsMzUT7eusgii9PKf9L1ZaXDDbY=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/dataplane/examples v0.0.1 h1:K9M5glueWyLoL4//H+EtTQq16lXuHLmOhb6DjSCahzA=
github.com/grafana/dataplane/examples v0.0.1/go.mod h1:h5YwY8s407/17XF5/dS8XrUtsTVV2RnuW8+m1Mp46mg=
github.com/grafana/dataplane/sdata v0.0.9 h1:AGL1LZnCUG4MnQtnWpBPbQ8ZpptaZs14w6kE/MWfg7s=

View File

@@ -4,7 +4,7 @@ go 1.25.3
require (
cuelang.org/go v0.11.1
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4
github.com/grafana/grafana-app-sdk v0.48.2
github.com/grafana/grafana-app-sdk/logging v0.48.1
github.com/grafana/grafana-plugin-sdk-go v0.283.0
@@ -57,6 +57,7 @@ require (
github.com/hashicorp/go-hclog v1.6.3 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-plugin v1.7.0 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hashicorp/yamux v0.1.2 // indirect
github.com/jaegertracing/jaeger-idl v0.5.0 // indirect
github.com/josharian/intern v1.0.0 // indirect

View File

@@ -81,8 +81,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 h1:Muoy+FMGrHj3GdFbvsMzUT7eusgii9PKf9L1ZaXDDbY=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4=
github.com/grafana/grafana-app-sdk v0.48.2 h1:CQQDhwo1fWaXQVKvxxOcK6azbuY3E2TgJHNAZlYYn7U=
@@ -112,6 +112,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-plugin v1.7.0 h1:YghfQH/0QmPNc/AZMTFE3ac8fipZyZECHdDPshfk+mA=
github.com/hashicorp/go-plugin v1.7.0/go.mod h1:BExt6KEaIYx804z8k4gRzRLEvxKVb+kn0NMcihqOqb8=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8=
github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns=
github.com/jaegertracing/jaeger-idl v0.5.0 h1:zFXR5NL3Utu7MhPg8ZorxtCBjHrL3ReM1VoB65FOFGE=

View File

@@ -66,6 +66,9 @@ func ValidateDashboardSpec(obj *Dashboard, forceValidation bool) (field.ErrorLis
}
func formatErrorPath(path []string) string {
if len(path) <= 4 {
return strings.Join(path, ".")
}
// omitting the "lineage.schemas[0].schema.spec" prefix here.
return strings.Join(path[4:], ".")
}

View File

@@ -67,6 +67,9 @@ func ValidateDashboardSpec(obj *Dashboard, forceValidation bool) (field.ErrorLis
}
func formatErrorPath(path []string) string {
if len(path) <= 4 {
return strings.Join(path, ".")
}
// omitting the "lineage.schemas[0].schema.spec" prefix here.
return strings.Join(path[4:], ".")
}

View File

@@ -11,12 +11,7 @@ import (
"github.com/grafana/grafana/apps/dashboard/pkg/migration/schemaversion"
)
func RegisterConversions(s *runtime.Scheme, dsIndexProvider schemaversion.DataSourceIndexProvider, _ schemaversion.LibraryElementIndexProvider) error {
// Wrap the provider once with 10s caching for all conversions.
// This prevents repeated DB queries across multiple conversion calls while allowing
// the cache to refresh periodically, making it suitable for long-lived singleton usage.
dsIndexProvider = schemaversion.WrapIndexProviderWithCache(dsIndexProvider)
func RegisterConversions(s *runtime.Scheme, dsIndexProvider schemaversion.DataSourceIndexProvider, leIndexProvider schemaversion.LibraryElementIndexProvider) error {
// v0 conversions
if err := s.AddConversionFunc((*dashv0.Dashboard)(nil), (*dashv1.Dashboard)(nil),
withConversionMetrics(dashv0.APIVERSION, dashv1.APIVERSION, func(a, b interface{}, scope conversion.Scope) error {

View File

@@ -0,0 +1,436 @@
package conversion
import (
"context"
"sync/atomic"
"testing"
"time"
dashv0 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v0alpha1"
dashv1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1"
dashv2alpha1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v2alpha1"
dashv2beta1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v2beta1"
"github.com/grafana/grafana/apps/dashboard/pkg/migration"
"github.com/grafana/grafana/apps/dashboard/pkg/migration/schemaversion"
common "github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// countingDataSourceProvider tracks how many times Index() is called
type countingDataSourceProvider struct {
datasources []schemaversion.DataSourceInfo
callCount atomic.Int64
}
func newCountingDataSourceProvider(datasources []schemaversion.DataSourceInfo) *countingDataSourceProvider {
return &countingDataSourceProvider{
datasources: datasources,
}
}
func (p *countingDataSourceProvider) Index(_ context.Context) *schemaversion.DatasourceIndex {
p.callCount.Add(1)
return schemaversion.NewDatasourceIndex(p.datasources)
}
func (p *countingDataSourceProvider) getCallCount() int64 {
return p.callCount.Load()
}
// countingLibraryElementProvider tracks how many times GetLibraryElementInfo() is called
type countingLibraryElementProvider struct {
elements []schemaversion.LibraryElementInfo
callCount atomic.Int64
}
func newCountingLibraryElementProvider(elements []schemaversion.LibraryElementInfo) *countingLibraryElementProvider {
return &countingLibraryElementProvider{
elements: elements,
}
}
func (p *countingLibraryElementProvider) GetLibraryElementInfo(_ context.Context) []schemaversion.LibraryElementInfo {
p.callCount.Add(1)
return p.elements
}
// createTestV0Dashboard creates a minimal v0 dashboard for testing
// The dashboard has a datasource with UID only (no type) to force provider lookup
// and includes library panels to test library element provider caching
func createTestV0Dashboard(namespace, title string) *dashv0.Dashboard {
return &dashv0.Dashboard{
ObjectMeta: metav1.ObjectMeta{
Name: "test-dashboard",
Namespace: namespace,
},
Spec: common.Unstructured{
Object: map[string]interface{}{
"title": title,
"schemaVersion": schemaversion.LATEST_VERSION,
// Variables with datasource reference that requires lookup
"templating": map[string]interface{}{
"list": []interface{}{
map[string]interface{}{
"name": "query_var",
"type": "query",
"query": "label_values(up, job)",
// Datasource with UID only - type needs to be looked up
"datasource": map[string]interface{}{
"uid": "ds1",
// type is intentionally omitted to trigger provider lookup
},
},
},
},
"panels": []interface{}{
map[string]interface{}{
"id": 1,
"title": "Test Panel",
"type": "timeseries",
"targets": []interface{}{
map[string]interface{}{
// Datasource with UID only - type needs to be looked up
"datasource": map[string]interface{}{
"uid": "ds1",
},
},
},
},
// Library panel reference - triggers library element provider lookup
map[string]interface{}{
"id": 2,
"title": "Library Panel with Horizontal Repeat",
"type": "library-panel-ref",
"gridPos": map[string]interface{}{
"h": 8,
"w": 12,
"x": 0,
"y": 8,
},
"libraryPanel": map[string]interface{}{
"uid": "lib-panel-repeat-h",
"name": "Library Panel with Horizontal Repeat",
},
},
// Another library panel reference
map[string]interface{}{
"id": 3,
"title": "Library Panel without Repeat",
"type": "library-panel-ref",
"gridPos": map[string]interface{}{
"h": 3,
"w": 6,
"x": 0,
"y": 16,
},
"libraryPanel": map[string]interface{}{
"uid": "lib-panel-no-repeat",
"name": "Library Panel without Repeat",
},
},
},
},
},
}
}
// createTestV1Dashboard creates a minimal v1beta1 dashboard for testing
// The dashboard has a datasource with UID only (no type) to force provider lookup
// and includes library panels to test library element provider caching
func createTestV1Dashboard(namespace, title string) *dashv1.Dashboard {
return &dashv1.Dashboard{
ObjectMeta: metav1.ObjectMeta{
Name: "test-dashboard",
Namespace: namespace,
},
Spec: common.Unstructured{
Object: map[string]interface{}{
"title": title,
"schemaVersion": schemaversion.LATEST_VERSION,
// Variables with datasource reference that requires lookup
"templating": map[string]interface{}{
"list": []interface{}{
map[string]interface{}{
"name": "query_var",
"type": "query",
"query": "label_values(up, job)",
// Datasource with UID only - type needs to be looked up
"datasource": map[string]interface{}{
"uid": "ds1",
// type is intentionally omitted to trigger provider lookup
},
},
},
},
"panels": []interface{}{
map[string]interface{}{
"id": 1,
"title": "Test Panel",
"type": "timeseries",
"targets": []interface{}{
map[string]interface{}{
// Datasource with UID only - type needs to be looked up
"datasource": map[string]interface{}{
"uid": "ds1",
},
},
},
},
// Library panel reference - triggers library element provider lookup
map[string]interface{}{
"id": 2,
"title": "Library Panel with Vertical Repeat",
"type": "library-panel-ref",
"gridPos": map[string]interface{}{
"h": 4,
"w": 6,
"x": 0,
"y": 8,
},
"libraryPanel": map[string]interface{}{
"uid": "lib-panel-repeat-v",
"name": "Library Panel with Vertical Repeat",
},
},
// Another library panel reference
map[string]interface{}{
"id": 3,
"title": "Library Panel without Repeat",
"type": "library-panel-ref",
"gridPos": map[string]interface{}{
"h": 3,
"w": 6,
"x": 6,
"y": 8,
},
"libraryPanel": map[string]interface{}{
"uid": "lib-panel-no-repeat",
"name": "Library Panel without Repeat",
},
},
},
},
},
}
}
// TestConversionCaching_V0_to_V2alpha1 verifies caching works when converting V0 to V2alpha1
func TestConversionCaching_V0_to_V2alpha1(t *testing.T) {
datasources := []schemaversion.DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
}
elements := []schemaversion.LibraryElementInfo{
{UID: "lib-panel-repeat-h", Name: "Library Panel with Horizontal Repeat", Type: "timeseries"},
{UID: "lib-panel-no-repeat", Name: "Library Panel without Repeat", Type: "graph"},
}
underlyingDS := newCountingDataSourceProvider(datasources)
underlyingLE := newCountingLibraryElementProvider(elements)
cachedDS := schemaversion.WrapIndexProviderWithCache(underlyingDS, time.Minute)
cachedLE := schemaversion.WrapLibraryElementProviderWithCache(underlyingLE, time.Minute)
migration.ResetForTesting()
migration.Initialize(cachedDS, cachedLE, migration.DefaultCacheTTL)
// Convert multiple dashboards in the same namespace
numDashboards := 5
namespace := "default"
for i := 0; i < numDashboards; i++ {
source := createTestV0Dashboard(namespace, "Dashboard "+string(rune('A'+i)))
target := &dashv2alpha1.Dashboard{}
err := Convert_V0_to_V2alpha1(source, target, nil, cachedDS)
require.NoError(t, err, "conversion %d should succeed", i)
require.NotNil(t, target.Spec)
}
// With caching, the underlying datasource provider should only be called once per namespace
// The test dashboard has datasources without type that require lookup
assert.Equal(t, int64(1), underlyingDS.getCallCount(),
"datasource provider should be called only once for %d conversions in same namespace", numDashboards)
}
// TestConversionCaching_V0_to_V2beta1 verifies caching works when converting V0 to V2beta1
func TestConversionCaching_V0_to_V2beta1(t *testing.T) {
datasources := []schemaversion.DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
}
elements := []schemaversion.LibraryElementInfo{
{UID: "lib-panel-repeat-h", Name: "Library Panel with Horizontal Repeat", Type: "timeseries"},
{UID: "lib-panel-no-repeat", Name: "Library Panel without Repeat", Type: "graph"},
}
underlyingDS := newCountingDataSourceProvider(datasources)
underlyingLE := newCountingLibraryElementProvider(elements)
cachedDS := schemaversion.WrapIndexProviderWithCache(underlyingDS, time.Minute)
cachedLE := schemaversion.WrapLibraryElementProviderWithCache(underlyingLE, time.Minute)
migration.ResetForTesting()
migration.Initialize(cachedDS, cachedLE, migration.DefaultCacheTTL)
numDashboards := 5
namespace := "default"
for i := 0; i < numDashboards; i++ {
source := createTestV0Dashboard(namespace, "Dashboard "+string(rune('A'+i)))
target := &dashv2beta1.Dashboard{}
err := Convert_V0_to_V2beta1(source, target, nil, cachedDS)
require.NoError(t, err, "conversion %d should succeed", i)
require.NotNil(t, target.Spec)
}
assert.Equal(t, int64(1), underlyingDS.getCallCount(),
"datasource provider should be called only once for %d conversions in same namespace", numDashboards)
}
// TestConversionCaching_V1beta1_to_V2alpha1 verifies caching works when converting V1beta1 to V2alpha1
func TestConversionCaching_V1beta1_to_V2alpha1(t *testing.T) {
datasources := []schemaversion.DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
}
elements := []schemaversion.LibraryElementInfo{
{UID: "lib-panel-repeat-v", Name: "Library Panel with Vertical Repeat", Type: "timeseries"},
{UID: "lib-panel-no-repeat", Name: "Library Panel without Repeat", Type: "graph"},
}
underlyingDS := newCountingDataSourceProvider(datasources)
underlyingLE := newCountingLibraryElementProvider(elements)
cachedDS := schemaversion.WrapIndexProviderWithCache(underlyingDS, time.Minute)
cachedLE := schemaversion.WrapLibraryElementProviderWithCache(underlyingLE, time.Minute)
migration.ResetForTesting()
migration.Initialize(cachedDS, cachedLE, migration.DefaultCacheTTL)
numDashboards := 5
namespace := "default"
for i := 0; i < numDashboards; i++ {
source := createTestV1Dashboard(namespace, "Dashboard "+string(rune('A'+i)))
target := &dashv2alpha1.Dashboard{}
err := Convert_V1beta1_to_V2alpha1(source, target, nil, cachedDS)
require.NoError(t, err, "conversion %d should succeed", i)
require.NotNil(t, target.Spec)
}
assert.Equal(t, int64(1), underlyingDS.getCallCount(),
"datasource provider should be called only once for %d conversions in same namespace", numDashboards)
}
// TestConversionCaching_V1beta1_to_V2beta1 verifies caching works when converting V1beta1 to V2beta1
func TestConversionCaching_V1beta1_to_V2beta1(t *testing.T) {
datasources := []schemaversion.DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
}
elements := []schemaversion.LibraryElementInfo{
{UID: "lib-panel-repeat-v", Name: "Library Panel with Vertical Repeat", Type: "timeseries"},
{UID: "lib-panel-no-repeat", Name: "Library Panel without Repeat", Type: "graph"},
}
underlyingDS := newCountingDataSourceProvider(datasources)
underlyingLE := newCountingLibraryElementProvider(elements)
cachedDS := schemaversion.WrapIndexProviderWithCache(underlyingDS, time.Minute)
cachedLE := schemaversion.WrapLibraryElementProviderWithCache(underlyingLE, time.Minute)
migration.ResetForTesting()
migration.Initialize(cachedDS, cachedLE, migration.DefaultCacheTTL)
numDashboards := 5
namespace := "default"
for i := 0; i < numDashboards; i++ {
source := createTestV1Dashboard(namespace, "Dashboard "+string(rune('A'+i)))
target := &dashv2beta1.Dashboard{}
err := Convert_V1beta1_to_V2beta1(source, target, nil, cachedDS)
require.NoError(t, err, "conversion %d should succeed", i)
require.NotNil(t, target.Spec)
}
assert.Equal(t, int64(1), underlyingDS.getCallCount(),
"datasource provider should be called only once for %d conversions in same namespace", numDashboards)
}
// TestConversionCaching_MultipleNamespaces verifies that different namespaces get separate cache entries
func TestConversionCaching_MultipleNamespaces(t *testing.T) {
datasources := []schemaversion.DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
}
elements := []schemaversion.LibraryElementInfo{
{UID: "lib-panel-repeat-h", Name: "Library Panel with Horizontal Repeat", Type: "timeseries"},
{UID: "lib-panel-no-repeat", Name: "Library Panel without Repeat", Type: "graph"},
}
underlyingDS := newCountingDataSourceProvider(datasources)
underlyingLE := newCountingLibraryElementProvider(elements)
cachedDS := schemaversion.WrapIndexProviderWithCache(underlyingDS, time.Minute)
cachedLE := schemaversion.WrapLibraryElementProviderWithCache(underlyingLE, time.Minute)
migration.ResetForTesting()
migration.Initialize(cachedDS, cachedLE, migration.DefaultCacheTTL)
namespaces := []string{"default", "org-2", "org-3"}
numDashboardsPerNs := 3
for _, ns := range namespaces {
for i := 0; i < numDashboardsPerNs; i++ {
source := createTestV0Dashboard(ns, "Dashboard "+string(rune('A'+i)))
target := &dashv2alpha1.Dashboard{}
err := Convert_V0_to_V2alpha1(source, target, nil, cachedDS)
require.NoError(t, err, "conversion for namespace %s should succeed", ns)
}
}
// With caching, each namespace should result in one call to the underlying provider
expectedCalls := int64(len(namespaces))
assert.Equal(t, expectedCalls, underlyingDS.getCallCount(),
"datasource provider should be called once per namespace (%d namespaces)", len(namespaces))
}
// TestConversionCaching_CacheDisabled verifies that TTL=0 disables caching
func TestConversionCaching_CacheDisabled(t *testing.T) {
datasources := []schemaversion.DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
}
elements := []schemaversion.LibraryElementInfo{
{UID: "lib-panel-repeat-h", Name: "Library Panel with Horizontal Repeat", Type: "timeseries"},
{UID: "lib-panel-no-repeat", Name: "Library Panel without Repeat", Type: "graph"},
}
underlyingDS := newCountingDataSourceProvider(datasources)
underlyingLE := newCountingLibraryElementProvider(elements)
// TTL of 0 should disable caching - the wrapper returns the underlying provider directly
cachedDS := schemaversion.WrapIndexProviderWithCache(underlyingDS, 0)
cachedLE := schemaversion.WrapLibraryElementProviderWithCache(underlyingLE, 0)
migration.ResetForTesting()
migration.Initialize(cachedDS, cachedLE, migration.DefaultCacheTTL)
numDashboards := 3
namespace := "default"
for i := 0; i < numDashboards; i++ {
source := createTestV0Dashboard(namespace, "Dashboard "+string(rune('A'+i)))
target := &dashv2alpha1.Dashboard{}
err := Convert_V0_to_V2alpha1(source, target, nil, cachedDS)
require.NoError(t, err, "conversion %d should succeed", i)
}
// Without caching, each conversion calls the underlying provider multiple times
// (once for each datasource lookup needed - variables and panels)
// The key check is that the count is GREATER than 1 per conversion (no caching benefit)
assert.Greater(t, underlyingDS.getCallCount(), int64(numDashboards),
"with cache disabled, conversions should call datasource provider multiple times")
}

View File

@@ -829,7 +829,7 @@ func TestDataLossDetectionOnAllInputFiles(t *testing.T) {
// Initialize the migrator with a test data source provider
dsProvider := testutil.NewDataSourceProvider(testutil.StandardTestConfig)
leProvider := testutil.NewLibraryElementProvider()
migration.Initialize(dsProvider, leProvider)
migration.Initialize(dsProvider, leProvider, migration.DefaultCacheTTL)
// Set up conversion scheme
scheme := runtime.NewScheme()

View File

@@ -34,7 +34,7 @@ func TestConversionMatrixExist(t *testing.T) {
// Initialize the migrator with a test data source provider
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
migration.Initialize(dsProvider, leProvider)
migration.Initialize(dsProvider, leProvider, migration.DefaultCacheTTL)
versions := []metav1.Object{
&dashv0.Dashboard{Spec: common.Unstructured{Object: map[string]any{"title": "dashboardV0"}}},
@@ -87,7 +87,7 @@ func TestDashboardConversionToAllVersions(t *testing.T) {
// Initialize the migrator with a test data source provider
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
migration.Initialize(dsProvider, leProvider)
migration.Initialize(dsProvider, leProvider, migration.DefaultCacheTTL)
// Set up conversion scheme
scheme := runtime.NewScheme()
@@ -247,7 +247,7 @@ func TestMigratedDashboardsConversion(t *testing.T) {
// Initialize the migrator with a test data source provider
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
migration.Initialize(dsProvider, leProvider)
migration.Initialize(dsProvider, leProvider, migration.DefaultCacheTTL)
// Set up conversion scheme
scheme := runtime.NewScheme()
@@ -382,7 +382,7 @@ func TestConversionMetrics(t *testing.T) {
// Initialize migration with test providers
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
migration.Initialize(dsProvider, leProvider)
migration.Initialize(dsProvider, leProvider, migration.DefaultCacheTTL)
// Create a test registry for metrics
registry := prometheus.NewRegistry()
@@ -510,7 +510,7 @@ func TestConversionMetrics(t *testing.T) {
func TestConversionMetricsWrapper(t *testing.T) {
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
migration.Initialize(dsProvider, leProvider)
migration.Initialize(dsProvider, leProvider, migration.DefaultCacheTTL)
// Create a test registry for metrics
registry := prometheus.NewRegistry()
@@ -679,7 +679,7 @@ func TestSchemaVersionExtraction(t *testing.T) {
// Test the schema version extraction logic by creating a wrapper and checking the metrics labels
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
migration.Initialize(dsProvider, leProvider)
migration.Initialize(dsProvider, leProvider, migration.DefaultCacheTTL)
// Create a test registry for metrics
registry := prometheus.NewRegistry()
@@ -724,7 +724,7 @@ func TestSchemaVersionExtraction(t *testing.T) {
func TestConversionLogging(t *testing.T) {
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
migration.Initialize(dsProvider, leProvider)
migration.Initialize(dsProvider, leProvider, migration.DefaultCacheTTL)
// Create a test registry for metrics
registry := prometheus.NewRegistry()
@@ -816,7 +816,7 @@ func TestConversionLogging(t *testing.T) {
func TestConversionLogLevels(t *testing.T) {
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
migration.Initialize(dsProvider, leProvider)
migration.Initialize(dsProvider, leProvider, migration.DefaultCacheTTL)
t.Run("log levels and structured fields verification", func(t *testing.T) {
// Create test wrapper to verify logging behavior
@@ -888,7 +888,7 @@ func TestConversionLogLevels(t *testing.T) {
func TestConversionLoggingFields(t *testing.T) {
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
migration.Initialize(dsProvider, leProvider)
migration.Initialize(dsProvider, leProvider, migration.DefaultCacheTTL)
t.Run("verify all log fields are present", func(t *testing.T) {
// Test that the conversion wrapper includes all expected structured fields

View File

@@ -20,7 +20,7 @@ func TestV0ConversionErrorHandling(t *testing.T) {
// Initialize the migrator with a test data source provider
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
migration.Initialize(dsProvider, leProvider)
migration.Initialize(dsProvider, leProvider, migration.DefaultCacheTTL)
tests := []struct {
name string
@@ -132,7 +132,7 @@ func TestV0ConversionErrorPropagation(t *testing.T) {
// Initialize the migrator with a test data source provider
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
migration.Initialize(dsProvider, leProvider)
migration.Initialize(dsProvider, leProvider, migration.DefaultCacheTTL)
t.Run("ConvertDashboard_V0_to_V1beta1 returns error on migration failure", func(t *testing.T) {
source := &dashv0.Dashboard{
@@ -206,7 +206,7 @@ func TestV0ConversionSuccessPaths(t *testing.T) {
// Initialize the migrator with a test data source provider
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
migration.Initialize(dsProvider, leProvider)
migration.Initialize(dsProvider, leProvider, migration.DefaultCacheTTL)
t.Run("Convert_V0_to_V1beta1 success path returns nil", func(t *testing.T) {
source := &dashv0.Dashboard{
@@ -275,7 +275,7 @@ func TestV0ConversionSecondStepErrors(t *testing.T) {
// Initialize the migrator with a test data source provider
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
migration.Initialize(dsProvider, leProvider)
migration.Initialize(dsProvider, leProvider, migration.DefaultCacheTTL)
t.Run("Convert_V0_to_V2alpha1 sets status on first step error", func(t *testing.T) {
// Create a dashboard that will fail v0->v1beta1 conversion

View File

@@ -19,7 +19,7 @@ func TestV1ConversionErrorHandling(t *testing.T) {
// Initialize the migrator with a test data source provider
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
migration.Initialize(dsProvider, leProvider)
migration.Initialize(dsProvider, leProvider, migration.DefaultCacheTTL)
t.Run("Convert_V1beta1_to_V2alpha1 sets status on conversion error", func(t *testing.T) {
// Create a dashboard that will cause conversion to fail

View File

@@ -24,7 +24,7 @@ func TestV2beta1ToV2alpha1RoundTrip(t *testing.T) {
// Initialize the migrator with test providers
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
migration.Initialize(dsProvider, leProvider)
migration.Initialize(dsProvider, leProvider, migration.DefaultCacheTTL)
// Set up conversion scheme
scheme := runtime.NewScheme()
@@ -107,7 +107,7 @@ func TestV2beta1ToV2alpha1FromOutputFiles(t *testing.T) {
// Initialize the migrator with test providers
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
migration.Initialize(dsProvider, leProvider)
migration.Initialize(dsProvider, leProvider, migration.DefaultCacheTTL)
// Set up conversion scheme
scheme := runtime.NewScheme()
@@ -193,7 +193,7 @@ func TestV2beta1ToV2alpha1(t *testing.T) {
// Initialize the migrator with test providers
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
migration.Initialize(dsProvider, leProvider)
migration.Initialize(dsProvider, leProvider, migration.DefaultCacheTTL)
// Set up conversion scheme
scheme := runtime.NewScheme()

View File

@@ -4,13 +4,19 @@ import (
"context"
"fmt"
"sync"
"time"
"github.com/grafana/authlib/types"
"github.com/grafana/grafana-app-sdk/logging"
"github.com/grafana/grafana/apps/dashboard/pkg/migration/schemaversion"
)
// DefaultCacheTTL is the default TTL for the datasource and library element caches.
const DefaultCacheTTL = time.Minute
// Initialize provides the migrator singleton with required dependencies and builds the map of migrations.
func Initialize(dsIndexProvider schemaversion.DataSourceIndexProvider, leIndexProvider schemaversion.LibraryElementIndexProvider) {
migratorInstance.init(dsIndexProvider, leIndexProvider)
func Initialize(dsIndexProvider schemaversion.DataSourceIndexProvider, leIndexProvider schemaversion.LibraryElementIndexProvider, cacheTTL time.Duration) {
migratorInstance.init(dsIndexProvider, leIndexProvider, cacheTTL)
}
// GetDataSourceIndexProvider returns the datasource index provider instance that was initialized.
@@ -38,6 +44,34 @@ func ResetForTesting() {
initOnce = sync.Once{}
}
// PreloadCache preloads the datasource and library element caches for the given namespaces.
func PreloadCache(ctx context.Context, nsInfos []types.NamespaceInfo) {
// Wait for initialization to complete
<-migratorInstance.ready
// Try to preload datasource cache
if preloadable, ok := migratorInstance.dsIndexProvider.(schemaversion.PreloadableCache); ok {
preloadable.Preload(ctx, nsInfos)
}
// Try to preload library element cache
if preloadable, ok := migratorInstance.leIndexProvider.(schemaversion.PreloadableCache); ok {
preloadable.Preload(ctx, nsInfos)
}
}
// PreloadCacheInBackground starts a goroutine that preloads the caches for the given namespaces.
func PreloadCacheInBackground(nsInfos []types.NamespaceInfo) {
go func() {
defer func() {
if r := recover(); r != nil {
logging.DefaultLogger.Error("panic during cache preloading", "error", r)
}
}()
PreloadCache(context.Background(), nsInfos)
}()
}
// Migrate migrates the given dashboard to the target version.
// This will block until the migrator is initialized.
func Migrate(ctx context.Context, dash map[string]interface{}, targetVersion int) error {
@@ -59,11 +93,15 @@ type migrator struct {
leIndexProvider schemaversion.LibraryElementIndexProvider
}
func (m *migrator) init(dsIndexProvider schemaversion.DataSourceIndexProvider, leIndexProvider schemaversion.LibraryElementIndexProvider) {
func (m *migrator) init(dsIndexProvider schemaversion.DataSourceIndexProvider, leIndexProvider schemaversion.LibraryElementIndexProvider, cacheTTL time.Duration) {
initOnce.Do(func() {
m.dsIndexProvider = dsIndexProvider
m.leIndexProvider = leIndexProvider
m.migrations = schemaversion.GetMigrations(dsIndexProvider, leIndexProvider)
// Wrap the provider with org-aware TTL caching for all conversions.
// This prevents repeated DB queries across multiple conversion calls while allowing
// the cache to refresh periodically, making it suitable for long-lived singleton usage.
m.dsIndexProvider = schemaversion.WrapIndexProviderWithCache(dsIndexProvider, cacheTTL)
// Wrap library element provider with caching as well
m.leIndexProvider = schemaversion.WrapLibraryElementProviderWithCache(leIndexProvider, cacheTTL)
m.migrations = schemaversion.GetMigrations(m.dsIndexProvider, m.leIndexProvider)
close(m.ready)
})
}

View File

@@ -10,10 +10,13 @@ import (
"path/filepath"
"strconv"
"strings"
"sync/atomic"
"testing"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apiserver/pkg/endpoints/request"
"github.com/grafana/grafana/apps/dashboard/pkg/migration/schemaversion"
migrationtestutil "github.com/grafana/grafana/apps/dashboard/pkg/migration/testutil"
@@ -31,7 +34,7 @@ func TestMigrate(t *testing.T) {
ResetForTesting()
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
Initialize(dsProvider, leProvider)
Initialize(dsProvider, leProvider, DefaultCacheTTL)
t.Run("minimum version check", func(t *testing.T) {
err := Migrate(context.Background(), map[string]interface{}{
@@ -49,7 +52,7 @@ func TestMigrateSingleVersion(t *testing.T) {
// Use the same datasource provider as the frontend test to ensure consistency
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
Initialize(dsProvider, leProvider)
Initialize(dsProvider, leProvider, DefaultCacheTTL)
runSingleVersionMigrationTests(t, SINGLE_VERSION_OUTPUT_DIR)
}
@@ -218,7 +221,7 @@ func TestSchemaMigrationMetrics(t *testing.T) {
// Initialize migration with test providers
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
Initialize(dsProvider, leProvider)
Initialize(dsProvider, leProvider, DefaultCacheTTL)
// Create a test registry for metrics
registry := prometheus.NewRegistry()
@@ -304,7 +307,7 @@ func TestSchemaMigrationMetrics(t *testing.T) {
func TestSchemaMigrationLogging(t *testing.T) {
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.StandardTestConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
Initialize(dsProvider, leProvider)
Initialize(dsProvider, leProvider, DefaultCacheTTL)
tests := []struct {
name string
@@ -423,7 +426,7 @@ func TestMigrateDevDashboards(t *testing.T) {
ResetForTesting()
dsProvider := migrationtestutil.NewDataSourceProvider(migrationtestutil.DevDashboardConfig)
leProvider := migrationtestutil.NewLibraryElementProvider()
Initialize(dsProvider, leProvider)
Initialize(dsProvider, leProvider, DefaultCacheTTL)
runDevDashboardMigrationTests(t, schemaversion.LATEST_VERSION, DEV_DASHBOARDS_OUTPUT_DIR)
}
@@ -449,3 +452,232 @@ func runDevDashboardMigrationTests(t *testing.T, targetVersion int, outputDir st
})
}
}
func TestMigrateWithCache(t *testing.T) {
// Reset the migration singleton before each test
ResetForTesting()
datasources := []schemaversion.DataSourceInfo{
{UID: "ds-uid-1", Type: "prometheus", Name: "Prometheus", Default: true, APIVersion: "v1"},
{UID: "ds-uid-2", Type: "loki", Name: "Loki", Default: false, APIVersion: "v1"},
{UID: "ds-uid-3", Type: "prometheus", Name: "Prometheus 2", Default: false, APIVersion: "v1"},
}
// Create a dashboard at schema version 32 for V33 and V36 migration with datasource references
dashboard1 := map[string]interface{}{
"schemaVersion": 32,
"title": "Test Dashboard 1",
"panels": []interface{}{
map[string]interface{}{
"id": 1,
"type": "timeseries",
// String datasource that V33 will migrate to object reference
"datasource": "Prometheus",
"targets": []interface{}{
map[string]interface{}{
"refId": "A",
"datasource": "Loki",
},
},
},
},
}
// Create a dashboard at schema version 35 for testing V36 migration with datasource references in annotations
dashboard2 := map[string]interface{}{
"schemaVersion": 35,
"title": "Test Dashboard 2",
"annotations": map[string]interface{}{
"list": []interface{}{
map[string]interface{}{
"name": "Test Annotation",
"datasource": "Prometheus 2", // String reference that V36 should convert
"enable": true,
},
},
},
}
t.Run("with datasources", func(t *testing.T) {
ResetForTesting()
dsProvider := newCountingProvider(datasources)
leProvider := newCountingLibraryProvider(nil)
// Initialize the migration system with our counting providers
Initialize(dsProvider, leProvider, DefaultCacheTTL)
// Verify initial call count is zero
assert.Equal(t, dsProvider.GetCallCount(), int64(0))
// Create a context with namespace (required for caching)
ctx := request.WithNamespace(context.Background(), "default")
// First migration - should invoke the provider once to build the cache
dash1 := deepCopyDashboard(dashboard1)
err := Migrate(ctx, dash1, schemaversion.LATEST_VERSION)
require.NoError(t, err)
assert.Equal(t, int64(1), dsProvider.GetCallCount())
// Verify datasource conversion from string to object reference
panels := dash1["panels"].([]interface{})
panel := panels[0].(map[string]interface{})
panelDS, ok := panel["datasource"].(map[string]interface{})
require.True(t, ok, "panel datasource should be converted to object")
assert.Equal(t, "ds-uid-1", panelDS["uid"])
assert.Equal(t, "prometheus", panelDS["type"])
// Verify target datasource conversion
targets := panel["targets"].([]interface{})
target := targets[0].(map[string]interface{})
targetDS, ok := target["datasource"].(map[string]interface{})
require.True(t, ok, "target datasource should be converted to object")
assert.Equal(t, "ds-uid-2", targetDS["uid"])
assert.Equal(t, "loki", targetDS["type"])
// Migration with V35 dashboard - should use the cached index from first migration
dash2 := deepCopyDashboard(dashboard2)
err = Migrate(ctx, dash2, schemaversion.LATEST_VERSION)
require.NoError(t, err, "second migration should succeed")
assert.Equal(t, int64(1), dsProvider.GetCallCount())
// Verify the annotation datasource was converted to object reference
annotations := dash2["annotations"].(map[string]interface{})
list := annotations["list"].([]interface{})
var testAnnotation map[string]interface{}
for _, a := range list {
ann := a.(map[string]interface{})
if ann["name"] == "Test Annotation" {
testAnnotation = ann
break
}
}
require.NotNil(t, testAnnotation, "Test Annotation should exist")
annotationDS, ok := testAnnotation["datasource"].(map[string]interface{})
require.True(t, ok, "annotation datasource should be converted to object")
assert.Equal(t, "ds-uid-3", annotationDS["uid"])
assert.Equal(t, "prometheus", annotationDS["type"])
})
// tests that cache isolates data per namespace
t.Run("with multiple orgs", func(t *testing.T) {
// Reset the migration singleton
ResetForTesting()
dsProvider := newCountingProvider(datasources)
leProvider := newCountingLibraryProvider(nil)
Initialize(dsProvider, leProvider, DefaultCacheTTL)
// Create contexts for different orgs with proper namespace format (org-ID)
ctx1 := request.WithNamespace(context.Background(), "default") // org 1
ctx2 := request.WithNamespace(context.Background(), "stacks-2") // stack 2
// Migrate for org 1
err := Migrate(ctx1, deepCopyDashboard(dashboard1), schemaversion.LATEST_VERSION)
require.NoError(t, err)
callsAfterOrg1 := dsProvider.GetCallCount()
// Migrate for org 2 - should build separate cache
err = Migrate(ctx2, deepCopyDashboard(dashboard2), schemaversion.LATEST_VERSION)
require.NoError(t, err)
callsAfterOrg2 := dsProvider.GetCallCount()
assert.Greater(t, callsAfterOrg2, callsAfterOrg1,
"org 2 migration should have called provider (separate cache)")
// Migrate again for org 1 - should use cache
err = Migrate(ctx1, deepCopyDashboard(dashboard1), schemaversion.LATEST_VERSION)
require.NoError(t, err)
callsAfterOrg1Again := dsProvider.GetCallCount()
assert.Equal(t, callsAfterOrg2, callsAfterOrg1Again,
"second org 1 migration should use cache")
// Migrate again for org 2 - should use cache
err = Migrate(ctx2, deepCopyDashboard(dashboard1), schemaversion.LATEST_VERSION)
require.NoError(t, err)
callsAfterOrg2Again := dsProvider.GetCallCount()
assert.Equal(t, callsAfterOrg2, callsAfterOrg2Again,
"second org 2 migration should use cache")
})
}
// countingProvider wraps a datasource provider and counts calls to Index()
type countingProvider struct {
datasources []schemaversion.DataSourceInfo
callCount atomic.Int64
}
func newCountingProvider(datasources []schemaversion.DataSourceInfo) *countingProvider {
return &countingProvider{
datasources: datasources,
}
}
func (p *countingProvider) Index(_ context.Context) *schemaversion.DatasourceIndex {
p.callCount.Add(1)
return schemaversion.NewDatasourceIndex(p.datasources)
}
func (p *countingProvider) GetCallCount() int64 {
return p.callCount.Load()
}
// countingLibraryProvider wraps a library element provider and counts calls
type countingLibraryProvider struct {
elements []schemaversion.LibraryElementInfo
callCount atomic.Int64
}
func newCountingLibraryProvider(elements []schemaversion.LibraryElementInfo) *countingLibraryProvider {
return &countingLibraryProvider{
elements: elements,
}
}
func (p *countingLibraryProvider) GetLibraryElementInfo(_ context.Context) []schemaversion.LibraryElementInfo {
p.callCount.Add(1)
return p.elements
}
func (p *countingLibraryProvider) GetCallCount() int64 {
return p.callCount.Load()
}
// deepCopyDashboard creates a deep copy of a dashboard map
func deepCopyDashboard(dash map[string]interface{}) map[string]interface{} {
cpy := make(map[string]interface{})
for k, v := range dash {
switch val := v.(type) {
case []interface{}:
cpy[k] = deepCopySlice(val)
case map[string]interface{}:
cpy[k] = deepCopyMapForCache(val)
default:
cpy[k] = v
}
}
return cpy
}
func deepCopySlice(s []interface{}) []interface{} {
cpy := make([]interface{}, len(s))
for i, v := range s {
switch val := v.(type) {
case []interface{}:
cpy[i] = deepCopySlice(val)
case map[string]interface{}:
cpy[i] = deepCopyMapForCache(val)
default:
cpy[i] = v
}
}
return cpy
}
func deepCopyMapForCache(m map[string]interface{}) map[string]interface{} {
cpy := make(map[string]interface{})
for k, v := range m {
switch val := v.(type) {
case []interface{}:
cpy[k] = deepCopySlice(val)
case map[string]interface{}:
cpy[k] = deepCopyMapForCache(val)
default:
cpy[k] = v
}
}
return cpy
}

View File

@@ -0,0 +1,104 @@
package schemaversion
import (
"context"
"sync"
"time"
"github.com/grafana/authlib/types"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/hashicorp/golang-lru/v2/expirable"
k8srequest "k8s.io/apiserver/pkg/endpoints/request"
"github.com/grafana/grafana/pkg/services/apiserver/endpoints/request"
)
const defaultCacheSize = 1000
// CacheProvider is a generic cache interface for schema version providers.
type CacheProvider[T any] interface {
// Get returns the cached value if it's still valid, otherwise calls fetch and caches the result.
Get(ctx context.Context) T
}
// PreloadableCache is an interface for providers that support preloading the cache.
type PreloadableCache interface {
// Preload loads data into the cache for the given namespaces.
Preload(ctx context.Context, nsInfos []types.NamespaceInfo)
}
// cachedProvider is a thread-safe TTL cache that wraps any fetch function.
type cachedProvider[T any] struct {
fetch func(context.Context) T
cache *expirable.LRU[string, T] // LRU cache: namespace to cache entry
inFlight sync.Map // map[string]*sync.Mutex - per-namespace fetch locks
logger log.Logger
}
// newCachedProvider creates a new cachedProvider.
// The fetch function should be able to handle context with different namespaces.
// A non-positive size turns LRU mechanism off (cache of unlimited size).
// A non-positive cacheTTL disables TTL expiration.
func newCachedProvider[T any](fetch func(context.Context) T, size int, cacheTTL time.Duration, logger log.Logger) *cachedProvider[T] {
cacheProvider := &cachedProvider[T]{
fetch: fetch,
logger: logger,
}
cacheProvider.cache = expirable.NewLRU(size, func(key string, value T) {
cacheProvider.inFlight.Delete(key)
}, cacheTTL)
return cacheProvider
}
// Get returns the cached value if it's still valid, otherwise calls fetch and caches the result.
func (p *cachedProvider[T]) Get(ctx context.Context) T {
// Get namespace info from ctx
nsInfo, err := request.NamespaceInfoFrom(ctx, true)
if err != nil {
// No namespace, fall back to direct fetch call without caching
p.logger.Warn("Unable to get namespace info from context, skipping cache", "error", err)
return p.fetch(ctx)
}
namespace := nsInfo.Value
// Fast path: check if cache is still valid
if entry, ok := p.cache.Get(namespace); ok {
return entry
}
// Get or create a per-namespace lock for this fetch operation
// This ensures only one fetch happens per namespace at a time
lockInterface, _ := p.inFlight.LoadOrStore(namespace, &sync.Mutex{})
nsMutex := lockInterface.(*sync.Mutex)
// Lock this specific namespace - other namespaces can still proceed
nsMutex.Lock()
defer nsMutex.Unlock()
// Double-check: another goroutine might have already fetched while we waited
if entry, ok := p.cache.Get(namespace); ok {
return entry
}
// Fetch outside the main lock - only this namespace is blocked
p.logger.Debug("cache miss or expired, fetching new value", "namespace", namespace)
value := p.fetch(ctx)
// Update the cache for this namespace
p.cache.Add(namespace, value)
return value
}
// Preload loads data into the cache for the given namespaces.
func (p *cachedProvider[T]) Preload(ctx context.Context, nsInfos []types.NamespaceInfo) {
// Build the cache using a context with the namespace
p.logger.Info("preloading cache", "nsInfos", len(nsInfos))
startedAt := time.Now()
defer func() {
p.logger.Info("finished preloading cache", "nsInfos", len(nsInfos), "elapsed", time.Since(startedAt))
}()
for _, nsInfo := range nsInfos {
p.cache.Add(nsInfo.Value, p.fetch(k8srequest.WithNamespace(ctx, nsInfo.Value)))
}
}

View File

@@ -0,0 +1,478 @@
package schemaversion
import (
"context"
"fmt"
"sync"
"sync/atomic"
"testing"
"time"
authlib "github.com/grafana/authlib/types"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apiserver/pkg/endpoints/request"
)
// testProvider tracks how many times get() is called
type testProvider struct {
testData any
callCount atomic.Int64
}
func newTestProvider(testData any) *testProvider {
return &testProvider{
testData: testData,
}
}
func (p *testProvider) get(_ context.Context) any {
p.callCount.Add(1)
return p.testData
}
func (p *testProvider) getCallCount() int64 {
return p.callCount.Load()
}
func TestCachedProvider_CacheHit(t *testing.T) {
datasources := []DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
{UID: "ds2", Type: "loki", Name: "Loki"},
}
underlying := newTestProvider(datasources)
// Test newCachedProvider directly instead of the wrapper
cached := newCachedProvider(underlying.get, defaultCacheSize, time.Minute, log.New("test"))
// Use "default" namespace (org 1) - this is the standard Grafana namespace format
ctx := request.WithNamespace(context.Background(), "default")
// First call should hit the underlying provider
idx1 := cached.Get(ctx)
require.NotNil(t, idx1)
assert.Equal(t, int64(1), underlying.getCallCount(), "first call should invoke underlying provider")
// Second call should use cache
idx2 := cached.Get(ctx)
require.NotNil(t, idx2)
assert.Equal(t, int64(1), underlying.getCallCount(), "second call should use cache, not invoke underlying provider")
// Both should return the same data
assert.Equal(t, idx1, idx2)
}
func TestCachedProvider_NamespaceIsolation(t *testing.T) {
datasources := []DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
}
underlying := newTestProvider(datasources)
cached := newCachedProvider(underlying.get, defaultCacheSize, time.Minute, log.New("test"))
// Use "default" (org 1) and "org-2" (org 2) - standard Grafana namespace formats
ctx1 := request.WithNamespace(context.Background(), "default")
ctx2 := request.WithNamespace(context.Background(), "org-2")
// First call for org 1
idx1 := cached.Get(ctx1)
require.NotNil(t, idx1)
assert.Equal(t, int64(1), underlying.getCallCount(), "first org-1 call should invoke underlying provider")
// Call for org 2 should also invoke underlying provider (different namespace)
idx2 := cached.Get(ctx2)
require.NotNil(t, idx2)
assert.Equal(t, int64(2), underlying.getCallCount(), "org-2 call should invoke underlying provider (separate cache)")
// Second call for org 1 should use cache
idx3 := cached.Get(ctx1)
require.NotNil(t, idx3)
assert.Equal(t, int64(2), underlying.getCallCount(), "second org-1 call should use cache")
// Second call for org 2 should use cache
idx4 := cached.Get(ctx2)
require.NotNil(t, idx4)
assert.Equal(t, int64(2), underlying.getCallCount(), "second org-2 call should use cache")
}
func TestCachedProvider_NoNamespaceFallback(t *testing.T) {
datasources := []DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
}
underlying := newTestProvider(datasources)
cached := newCachedProvider(underlying.get, defaultCacheSize, time.Minute, log.New("test"))
// Context without namespace - should fall back to direct provider call
ctx := context.Background()
idx1 := cached.Get(ctx)
require.NotNil(t, idx1)
assert.Equal(t, int64(1), underlying.getCallCount())
// Second call without namespace should also invoke underlying (no caching for unknown namespace)
idx2 := cached.Get(ctx)
require.NotNil(t, idx2)
assert.Equal(t, int64(2), underlying.getCallCount(), "without namespace, each call should invoke underlying provider")
}
func TestCachedProvider_ConcurrentAccess(t *testing.T) {
datasources := []DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
}
underlying := newTestProvider(datasources)
cached := newCachedProvider(underlying.get, defaultCacheSize, time.Minute, log.New("test"))
// Use "default" namespace (org 1)
ctx := request.WithNamespace(context.Background(), "default")
var wg sync.WaitGroup
numGoroutines := 100
// Launch many goroutines that all try to access the cache simultaneously
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func() {
defer wg.Done()
idx := cached.Get(ctx)
require.NotNil(t, idx)
}()
}
wg.Wait()
// Due to double-check locking, only 1 goroutine should have actually built the cache
// In practice, there might be a few more due to timing, but it should be much less than numGoroutines
callCount := underlying.getCallCount()
assert.LessOrEqual(t, callCount, int64(5), "with proper locking, very few goroutines should invoke underlying provider; got %d", callCount)
}
func TestCachedProvider_ConcurrentNamespaces(t *testing.T) {
datasources := []DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
}
underlying := newTestProvider(datasources)
cached := newCachedProvider(underlying.get, defaultCacheSize, time.Minute, log.New("test"))
var wg sync.WaitGroup
numOrgs := 10
callsPerOrg := 20
// Launch goroutines for multiple namespaces
// Use valid namespace formats: "default" for org 1, "org-N" for N > 1
namespaces := make([]string, numOrgs)
namespaces[0] = "default"
for i := 1; i < numOrgs; i++ {
namespaces[i] = fmt.Sprintf("org-%d", i+1)
}
for _, ns := range namespaces {
ctx := request.WithNamespace(context.Background(), ns)
for i := 0; i < callsPerOrg; i++ {
wg.Add(1)
go func(ctx context.Context) {
defer wg.Done()
idx := cached.Get(ctx)
require.NotNil(t, idx)
}(ctx)
}
}
wg.Wait()
// Each org should have at most a few calls (ideally 1, but timing can cause a few more)
callCount := underlying.getCallCount()
// With 10 orgs, we expect around 10 calls (one per org)
assert.LessOrEqual(t, callCount, int64(numOrgs), "expected roughly one call per org, got %d calls for %d orgs", callCount, numOrgs)
}
// Test that cache returns correct data for each namespace
func TestCachedProvider_CorrectDataPerNamespace(t *testing.T) {
// Provider that returns different data based on namespace
underlying := &namespaceAwareProvider{
datasourcesByNamespace: map[string][]DataSourceInfo{
"default": {{UID: "org1-ds", Type: "prometheus", Name: "Org1 DS", Default: true}},
"org-2": {{UID: "org2-ds", Type: "loki", Name: "Org2 DS", Default: true}},
},
}
cached := newCachedProvider(underlying.Index, defaultCacheSize, time.Minute, log.New("test"))
// Use valid namespace formats
ctx1 := request.WithNamespace(context.Background(), "default")
ctx2 := request.WithNamespace(context.Background(), "org-2")
idx1 := cached.Get(ctx1)
idx2 := cached.Get(ctx2)
assert.Equal(t, "org1-ds", idx1.GetDefault().UID, "org 1 should get org-1 datasources")
assert.Equal(t, "org2-ds", idx2.GetDefault().UID, "org 2 should get org-2 datasources")
// Subsequent calls should still return correct data
idx1Again := cached.Get(ctx1)
idx2Again := cached.Get(ctx2)
assert.Equal(t, "org1-ds", idx1Again.GetDefault().UID, "org 1 should still get org-1 datasources from cache")
assert.Equal(t, "org2-ds", idx2Again.GetDefault().UID, "org 2 should still get org-2 datasources from cache")
}
// TestCachedProvider_PreloadMultipleNamespaces verifies preloading multiple namespaces
func TestCachedProvider_PreloadMultipleNamespaces(t *testing.T) {
// Provider that returns different data based on namespace
underlying := &namespaceAwareProvider{
datasourcesByNamespace: map[string][]DataSourceInfo{
"default": {{UID: "org1-ds", Type: "prometheus", Name: "Org1 DS", Default: true}},
"org-2": {{UID: "org2-ds", Type: "loki", Name: "Org2 DS", Default: true}},
"org-3": {{UID: "org3-ds", Type: "tempo", Name: "Org3 DS", Default: true}},
},
}
cached := newCachedProvider(underlying.Index, defaultCacheSize, time.Minute, log.New("test"))
// Preload multiple namespaces
nsInfos := []authlib.NamespaceInfo{
createNamespaceInfo(1, 0, "default"),
createNamespaceInfo(2, 0, "org-2"),
createNamespaceInfo(3, 0, "org-3"),
}
cached.Preload(context.Background(), nsInfos)
// After preload, the underlying provider should have been called once per namespace
assert.Equal(t, 3, underlying.callCount, "preload should call underlying provider once per namespace")
// Access all namespaces - should use preloaded data and get correct data per namespace
expectedUIDs := map[string]string{
"default": "org1-ds",
"org-2": "org2-ds",
"org-3": "org3-ds",
}
for _, ns := range []string{"default", "org-2", "org-3"} {
ctx := request.WithNamespace(context.Background(), ns)
idx := cached.Get(ctx)
require.NotNil(t, idx, "index for namespace %s should not be nil", ns)
assert.Equal(t, expectedUIDs[ns], idx.GetDefault().UID, "namespace %s should get correct datasource", ns)
}
// The underlying provider should still have been called only 3 times (from preload)
assert.Equal(t, 3, underlying.callCount,
"access after preload should use cached data for all namespaces")
}
// namespaceAwareProvider returns different datasources based on namespace
type namespaceAwareProvider struct {
datasourcesByNamespace map[string][]DataSourceInfo
callCount int
}
func (p *namespaceAwareProvider) Index(ctx context.Context) *DatasourceIndex {
p.callCount++
ns := request.NamespaceValue(ctx)
if ds, ok := p.datasourcesByNamespace[ns]; ok {
return NewDatasourceIndex(ds)
}
return NewDatasourceIndex(nil)
}
// createNamespaceInfo creates a NamespaceInfo for testing
func createNamespaceInfo(orgID, stackID int64, value string) authlib.NamespaceInfo {
return authlib.NamespaceInfo{
OrgID: orgID,
StackID: stackID,
Value: value,
}
}
// Test DatasourceIndex functionality
func TestDatasourceIndex_Lookup(t *testing.T) {
datasources := []DataSourceInfo{
{UID: "ds-uid-1", Type: "prometheus", Name: "Prometheus DS", Default: true, APIVersion: "v1"},
{UID: "ds-uid-2", Type: "loki", Name: "Loki DS", Default: false, APIVersion: "v1"},
}
idx := NewDatasourceIndex(datasources)
t.Run("lookup by name", func(t *testing.T) {
ds := idx.Lookup("Prometheus DS")
require.NotNil(t, ds)
assert.Equal(t, "ds-uid-1", ds.UID)
})
t.Run("lookup by UID", func(t *testing.T) {
ds := idx.Lookup("ds-uid-2")
require.NotNil(t, ds)
assert.Equal(t, "Loki DS", ds.Name)
})
t.Run("lookup unknown returns nil", func(t *testing.T) {
ds := idx.Lookup("unknown")
assert.Nil(t, ds)
})
t.Run("get default", func(t *testing.T) {
ds := idx.GetDefault()
require.NotNil(t, ds)
assert.Equal(t, "ds-uid-1", ds.UID)
})
t.Run("lookup by UID directly", func(t *testing.T) {
ds := idx.LookupByUID("ds-uid-1")
require.NotNil(t, ds)
assert.Equal(t, "Prometheus DS", ds.Name)
})
t.Run("lookup by name directly", func(t *testing.T) {
ds := idx.LookupByName("Loki DS")
require.NotNil(t, ds)
assert.Equal(t, "ds-uid-2", ds.UID)
})
}
func TestDatasourceIndex_EmptyIndex(t *testing.T) {
idx := NewDatasourceIndex(nil)
assert.Nil(t, idx.GetDefault())
assert.Nil(t, idx.Lookup("anything"))
assert.Nil(t, idx.LookupByUID("anything"))
assert.Nil(t, idx.LookupByName("anything"))
}
// TestCachedProvider_TTLExpiration verifies that cache expires after TTL
func TestCachedProvider_TTLExpiration(t *testing.T) {
datasources := []DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
}
underlying := newTestProvider(datasources)
// Use a very short TTL for testing
shortTTL := 50 * time.Millisecond
cached := newCachedProvider(underlying.get, defaultCacheSize, shortTTL, log.New("test"))
ctx := request.WithNamespace(context.Background(), "default")
// First call - should call underlying provider
idx1 := cached.Get(ctx)
require.NotNil(t, idx1)
assert.Equal(t, int64(1), underlying.getCallCount(), "first call should invoke underlying provider")
// Second call immediately - should use cache
idx2 := cached.Get(ctx)
require.NotNil(t, idx2)
assert.Equal(t, int64(1), underlying.getCallCount(), "second call should use cache")
// Wait for TTL to expire
time.Sleep(shortTTL + 20*time.Millisecond)
// Third call after TTL - should call underlying provider again
idx3 := cached.Get(ctx)
require.NotNil(t, idx3)
assert.Equal(t, int64(2), underlying.getCallCount(),
"after TTL expiration, underlying provider should be called again")
}
// TestCachedProvider_ParallelNamespacesFetch verifies that different namespaces can fetch in parallel
func TestCachedProvider_ParallelNamespacesFetch(t *testing.T) {
// Create a blocking provider that tracks concurrent executions
provider := &blockingProvider{
blockDuration: 100 * time.Millisecond,
datasources: []DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
},
}
cached := newCachedProvider(provider.get, defaultCacheSize, time.Minute, log.New("test"))
numNamespaces := 5
var wg sync.WaitGroup
// Launch fetches for different namespaces simultaneously
startTime := time.Now()
for i := 0; i < numNamespaces; i++ {
wg.Add(1)
namespace := fmt.Sprintf("org-%d", i+1)
go func(ns string) {
defer wg.Done()
ctx := request.WithNamespace(context.Background(), ns)
idx := cached.Get(ctx)
require.NotNil(t, idx)
}(namespace)
}
wg.Wait()
elapsed := time.Since(startTime)
// Verify that all namespaces were called
assert.Equal(t, int64(numNamespaces), provider.callCount.Load())
// Verify max concurrent executions shows parallelism
maxConcurrent := provider.maxConcurrent.Load()
assert.Equal(t, int64(numNamespaces), maxConcurrent)
// If all namespaces had to wait sequentially, it would take numNamespaces * blockDuration
// With parallelism, it should be much faster (close to just blockDuration)
sequentialTime := time.Duration(numNamespaces) * provider.blockDuration
assert.Less(t, elapsed, sequentialTime)
}
// TestCachedProvider_SameNamespaceSerialFetch verifies that the same namespace doesn't fetch concurrently
func TestCachedProvider_SameNamespaceSerialFetch(t *testing.T) {
// Create a blocking provider that tracks concurrent executions
provider := &blockingProvider{
blockDuration: 100 * time.Millisecond,
datasources: []DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
},
}
cached := newCachedProvider(provider.get, defaultCacheSize, time.Minute, log.New("test"))
numGoroutines := 10
var wg sync.WaitGroup
// Launch multiple fetches for the SAME namespace simultaneously
ctx := request.WithNamespace(context.Background(), "default")
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func() {
defer wg.Done()
idx := cached.Get(ctx)
require.NotNil(t, idx)
}()
}
wg.Wait()
// Max concurrent should be 1 since all goroutines are for the same namespace
maxConcurrent := provider.maxConcurrent.Load()
assert.Equal(t, int64(1), maxConcurrent)
}
// blockingProvider is a test provider that simulates slow fetch operations
// and tracks concurrent executions
type blockingProvider struct {
blockDuration time.Duration
datasources []DataSourceInfo
callCount atomic.Int64
currentActive atomic.Int64
maxConcurrent atomic.Int64
}
func (p *blockingProvider) get(_ context.Context) any {
p.callCount.Add(1)
// Track concurrent executions
current := p.currentActive.Add(1)
// Update max concurrent if this is a new peak
for {
maxVal := p.maxConcurrent.Load()
if current <= maxVal {
break
}
if p.maxConcurrent.CompareAndSwap(maxVal, current) {
break
}
}
// Simulate slow operation
time.Sleep(p.blockDuration)
p.currentActive.Add(-1)
return p.datasources
}

View File

@@ -2,8 +2,9 @@ package schemaversion
import (
"context"
"sync"
"time"
"github.com/grafana/grafana/pkg/infra/log"
)
// Shared utility functions for datasource migrations across different schema versions.
@@ -11,65 +12,41 @@ import (
// string names/UIDs to structured reference objects with uid, type, and apiVersion.
// cachedIndexProvider wraps a DataSourceIndexProvider with time-based caching.
// This prevents multiple DB queries and index builds during operations that may call
// provider.Index() multiple times (e.g., dashboard conversions with many datasource lookups).
// The cache expires after 10 seconds, allowing it to be used as a long-lived singleton
// while still refreshing periodically.
//
// Thread-safe: Uses sync.RWMutex to guarantee safe concurrent access.
type cachedIndexProvider struct {
provider DataSourceIndexProvider
mu sync.RWMutex
index *DatasourceIndex
cachedAt time.Time
cacheTTL time.Duration
*cachedProvider[*DatasourceIndex]
}
// Index returns the cached index if it's still valid (< 10s old), otherwise rebuilds it.
// Uses RWMutex for efficient concurrent reads when cache is valid.
// Index returns the cached index if it's still valid (< TTL old), otherwise rebuilds it.
func (p *cachedIndexProvider) Index(ctx context.Context) *DatasourceIndex {
// Fast path: check if cache is still valid using read lock
p.mu.RLock()
if p.index != nil && time.Since(p.cachedAt) < p.cacheTTL {
idx := p.index
p.mu.RUnlock()
return idx
}
p.mu.RUnlock()
// Slow path: cache expired or not yet built, acquire write lock
p.mu.Lock()
defer p.mu.Unlock()
// Double-check: another goroutine might have refreshed the cache
// while we were waiting for the write lock
if p.index != nil && time.Since(p.cachedAt) < p.cacheTTL {
return p.index
}
// Rebuild the cache
p.index = p.provider.Index(ctx)
p.cachedAt = time.Now()
return p.index
return p.Get(ctx)
}
// WrapIndexProviderWithCache wraps a provider to cache the index with a 10-second TTL.
// Useful for conversions or migrations that may call provider.Index() multiple times.
// The cache expires after 10 seconds, making it suitable for use as a long-lived singleton
// at the top level of dependency injection while still refreshing periodically.
//
// Example usage in dashboard conversion:
//
// cachedDsIndexProvider := schemaversion.WrapIndexProviderWithCache(dsIndexProvider)
// // Now all calls to cachedDsIndexProvider.Index(ctx) return the same cached index
// // for up to 10 seconds before refreshing
func WrapIndexProviderWithCache(provider DataSourceIndexProvider) DataSourceIndexProvider {
if provider == nil {
return nil
// cachedLibraryElementProvider wraps a LibraryElementIndexProvider with time-based caching.
type cachedLibraryElementProvider struct {
*cachedProvider[[]LibraryElementInfo]
}
func (p *cachedLibraryElementProvider) GetLibraryElementInfo(ctx context.Context) []LibraryElementInfo {
return p.Get(ctx)
}
// WrapIndexProviderWithCache wraps a DataSourceIndexProvider to cache indexes with a configurable TTL.
func WrapIndexProviderWithCache(provider DataSourceIndexProvider, cacheTTL time.Duration) DataSourceIndexProvider {
if provider == nil || cacheTTL <= 0 {
return provider
}
return &cachedIndexProvider{
provider: provider,
cacheTTL: 10 * time.Second,
newCachedProvider[*DatasourceIndex](provider.Index, defaultCacheSize, cacheTTL, log.New("schemaversion.dsindexprovider")),
}
}
// WrapLibraryElementProviderWithCache wraps a LibraryElementIndexProvider to cache library elements with a configurable TTL.
func WrapLibraryElementProviderWithCache(provider LibraryElementIndexProvider, cacheTTL time.Duration) LibraryElementIndexProvider {
if provider == nil || cacheTTL <= 0 {
return provider
}
return &cachedLibraryElementProvider{
newCachedProvider[[]LibraryElementInfo](provider.GetLibraryElementInfo, defaultCacheSize, cacheTTL, log.New("schemaversion.leindexprovider")),
}
}

View File

@@ -35,7 +35,7 @@ require (
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // indirect
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 // indirect
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect

View File

@@ -52,8 +52,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 h1:Muoy+FMGrHj3GdFbvsMzUT7eusgii9PKf9L1ZaXDDbY=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4=
github.com/grafana/grafana-app-sdk v0.48.2 h1:CQQDhwo1fWaXQVKvxxOcK6azbuY3E2TgJHNAZlYYn7U=

View File

@@ -46,7 +46,7 @@ replace github.com/prometheus/alertmanager => github.com/grafana/prometheus-aler
require (
github.com/grafana/grafana v0.0.0-00010101000000-000000000000
github.com/grafana/grafana-app-sdk v0.48.1
github.com/grafana/grafana-app-sdk v0.48.2
github.com/grafana/grafana-app-sdk/logging v0.48.1
github.com/grafana/grafana/apps/folder v0.0.0
github.com/grafana/grafana/pkg/apimachinery v0.0.0
@@ -231,12 +231,12 @@ require (
github.com/gorilla/mux v1.8.1 // indirect
github.com/grafana/alerting v0.0.0-20251009192429-9427c24835ae // indirect
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // indirect
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 // indirect
github.com/grafana/dataplane/sdata v0.0.9 // indirect
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect
github.com/grafana/grafana-aws-sdk v1.3.0 // indirect
github.com/grafana/grafana-azure-sdk-go/v2 v2.3.1 // indirect
github.com/grafana/grafana-plugin-sdk-go v0.281.0 // indirect
github.com/grafana/grafana-plugin-sdk-go v0.283.0 // indirect
github.com/grafana/grafana/apps/dashboard v0.0.0 // indirect
github.com/grafana/grafana/apps/plugins v0.0.0 // indirect
github.com/grafana/grafana/apps/provisioning v0.0.0 // indirect
@@ -251,7 +251,7 @@ require (
github.com/grafana/sqlds/v4 v4.2.7 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20191002090509-6af20e3a5340 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
github.com/hashicorp/consul/api v1.31.2 // indirect
@@ -349,7 +349,7 @@ require (
github.com/pressly/goose/v3 v3.25.0 // indirect
github.com/prometheus/alertmanager v0.28.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.67.1 // indirect
github.com/prometheus/common v0.67.2 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/exporter-toolkit v0.14.0 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
@@ -425,18 +425,18 @@ require (
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
gocloud.dev v0.43.0 // indirect
golang.org/x/crypto v0.43.0 // indirect
golang.org/x/crypto v0.44.0 // indirect
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 // indirect
golang.org/x/mod v0.29.0 // indirect
golang.org/x/net v0.46.0 // indirect
golang.org/x/oauth2 v0.32.0 // indirect
golang.org/x/sync v0.17.0 // indirect
golang.org/x/sys v0.37.0 // indirect
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 // indirect
golang.org/x/term v0.36.0 // indirect
golang.org/x/text v0.30.0 // indirect
golang.org/x/mod v0.30.0 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/oauth2 v0.33.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect
golang.org/x/term v0.37.0 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/time v0.14.0 // indirect
golang.org/x/tools v0.38.0 // indirect
golang.org/x/tools v0.39.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
gonum.org/v1/gonum v0.16.0 // indirect

View File

@@ -839,6 +839,8 @@ github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZ
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 h1:Muoy+FMGrHj3GdFbvsMzUT7eusgii9PKf9L1ZaXDDbY=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/dataplane/examples v0.0.1 h1:K9M5glueWyLoL4//H+EtTQq16lXuHLmOhb6DjSCahzA=
github.com/grafana/dataplane/examples v0.0.1/go.mod h1:h5YwY8s407/17XF5/dS8XrUtsTVV2RnuW8+m1Mp46mg=
github.com/grafana/dataplane/sdata v0.0.9 h1:AGL1LZnCUG4MnQtnWpBPbQ8ZpptaZs14w6kE/MWfg7s=
@@ -853,6 +855,7 @@ github.com/grafana/gomemcache v0.0.0-20250828162811-a96f6acee2fe h1:q+QaVANzNZxv
github.com/grafana/gomemcache v0.0.0-20250828162811-a96f6acee2fe/go.mod h1:j/s0jkda4UXTemDs7Pgw/vMT06alWc42CHisvYac0qw=
github.com/grafana/grafana-app-sdk v0.48.1 h1:bKJadWH18WCpJ+Zk8AezRFXCcZgGredRv+fRS+8zkek=
github.com/grafana/grafana-app-sdk v0.48.1/go.mod h1:5LljCz+wvmGfkQ8ZKTOfserhtXNEF0cSFthoWShvN6c=
github.com/grafana/grafana-app-sdk v0.48.2/go.mod h1:LDOvQ7OOyHLcXdSa0InATCa5OMoYAd6E1+rGLrMgHuk=
github.com/grafana/grafana-app-sdk/logging v0.48.1 h1:veM0X5LAPyN3KsDLglWjIofndbGuf7MqnrDuDN+F/Ng=
github.com/grafana/grafana-app-sdk/logging v0.48.1/go.mod h1:Gh/nBWnspK3oDNWtiM5qUF/fardHzOIEez+SPI3JeHA=
github.com/grafana/grafana-aws-sdk v1.3.0 h1:/bfJzP93rCel1GbWoRSq0oUo424MZXt8jAp2BK9w8tM=
@@ -867,6 +870,7 @@ github.com/grafana/grafana-openapi-client-go v0.0.0-20231213163343-bd475d63fb79
github.com/grafana/grafana-openapi-client-go v0.0.0-20231213163343-bd475d63fb79/go.mod h1:wc6Hbh3K2TgCUSfBC/BOzabItujtHMESZeFk5ZhdxhQ=
github.com/grafana/grafana-plugin-sdk-go v0.281.0 h1:V8dGyatzcOLQeivFhBV2JWMwTSZH/clDnpfKG9p3dTA=
github.com/grafana/grafana-plugin-sdk-go v0.281.0/go.mod h1:3I0g+v6jAwVmrt6BEjDUP4V6pkhGP5QKY5NkXY4Ayr4=
github.com/grafana/grafana-plugin-sdk-go v0.283.0/go.mod h1:20qhoYxIgbZRmwCEO1KMP8q2yq/Kge5+xE/99/hLEk0=
github.com/grafana/grafana/apps/example v0.0.0-20251027162426-edef69fdc82b h1:6Bo65etvjQ4tStkaA5+N3A3ENbO4UAWj53TxF6g2Hdk=
github.com/grafana/grafana/apps/example v0.0.0-20251027162426-edef69fdc82b/go.mod h1:6+wASOCN8LWt6FJ8dc0oODUBIEY5XHaE6ABi8g0mR+k=
github.com/grafana/grafana/pkg/promlib v0.0.8 h1:VUWsqttdf0wMI4j9OX9oNrykguQpZcruudDAFpJJVw0=
@@ -901,6 +905,7 @@ github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 h1:QGLs
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0/go.mod h1:hM2alZsMUni80N33RBe6J0e423LB+odMj7d3EMP9l20=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 h1:sGm2vDRFUrQJO/Veii4h4zG2vvqG6uWNkBHSTqXOZk0=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2/go.mod h1:wd1YpapPLivG6nQgbf7ZkG1hhSOXDhhn4MLTknx2aAc=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3/go.mod h1:NbCUVmiS4foBGBHOYlCT25+YmGpJ32dZPi75pGEUpj4=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20191002090509-6af20e3a5340 h1:uGoIog/wiQHI9GAxXO5TJbT0wWKH3O9HhOJW1F9c3fY=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20191002090509-6af20e3a5340/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc=
@@ -1374,6 +1379,7 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.67.1 h1:OTSON1P4DNxzTg4hmKCc37o4ZAZDv0cfXLkOt0oEowI=
github.com/prometheus/common v0.67.1/go.mod h1:RpmT9v35q2Y+lsieQsdOh5sXZ6ajUGC8NjZAmr8vb0Q=
github.com/prometheus/common v0.67.2/go.mod h1:63W3KZb1JOKgcjlIr64WW/LvFGAqKPj0atm+knVGEko=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg=
@@ -1730,6 +1736,7 @@ golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1773,6 +1780,7 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1829,6 +1837,7 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1853,6 +1862,7 @@ golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1867,6 +1877,7 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1961,12 +1972,15 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU=
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE=
golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1978,6 +1992,7 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -2046,6 +2061,7 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
golang.org/x/tools/godoc v0.1.0-deprecated h1:o+aZ1BOj6Hsx/GBdJO/s815sqftjSnrZZwyYTHODvtk=
golang.org/x/tools/godoc v0.1.0-deprecated/go.mod h1:qM63CriJ961IHWmnWa9CjZnBndniPt4a3CK0PVB9bIg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -2305,20 +2321,27 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM=
k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk=
k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw=
k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI=
k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc=
k8s.io/apiextensions-apiserver v0.34.2/go.mod h1:398CJrsgXF1wytdaanynDpJ67zG4Xq7yj91GrmYN2SE=
k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
k8s.io/apiserver v0.34.1 h1:U3JBGdgANK3dfFcyknWde1G6X1F4bg7PXuvlqt8lITA=
k8s.io/apiserver v0.34.1/go.mod h1:eOOc9nrVqlBI1AFCvVzsob0OxtPZUCPiUJL45JOTBG0=
k8s.io/apiserver v0.34.2/go.mod h1:gqJQy2yDOB50R3JUReHSFr+cwJnL8G1dzTA0YLEqAPI=
k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY=
k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8=
k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE=
k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A=
k8s.io/component-base v0.34.1/go.mod h1:mknCpLlTSKHzAQJJnnHVKqjxR7gBeHRv0rPXA7gdtQ0=
k8s.io/component-base v0.34.2/go.mod h1:9xw2FHJavUHBFpiGkZoKuYZ5pdtLKe97DEByaA+hHbM=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kms v0.34.1 h1:iCFOvewDPzWM9fMTfyIPO+4MeuZ0tcZbugxLNSHFG4w=
k8s.io/kms v0.34.1/go.mod h1:s1CFkLG7w9eaTYvctOxosx88fl4spqmixnNpys0JAtM=
k8s.io/kms v0.34.2/go.mod h1:s1CFkLG7w9eaTYvctOxosx88fl4spqmixnNpys0JAtM=
k8s.io/kube-aggregator v0.34.1 h1:WNLV0dVNoFKmuyvdWLd92iDSyD/TSTjqwaPj0U9XAEU=
k8s.io/kube-aggregator v0.34.1/go.mod h1:RU8j+5ERfp0h+gIvWtxRPfsa5nK7rboDm8RST8BJfYQ=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=

View File

@@ -37,7 +37,7 @@ require (
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // indirect
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 // indirect
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect

View File

@@ -52,8 +52,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 h1:Muoy+FMGrHj3GdFbvsMzUT7eusgii9PKf9L1ZaXDDbY=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4=
github.com/grafana/grafana-app-sdk v0.48.2 h1:CQQDhwo1fWaXQVKvxxOcK6azbuY3E2TgJHNAZlYYn7U=

View File

@@ -42,7 +42,7 @@ require (
github.com/google/go-github/v64 v64.0.0 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // indirect
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 // indirect
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect
github.com/grafana/grafana-app-sdk v0.48.2 // indirect
github.com/josharian/intern v1.0.0 // indirect

View File

@@ -58,8 +58,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 h1:Muoy+FMGrHj3GdFbvsMzUT7eusgii9PKf9L1ZaXDDbY=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4=
github.com/grafana/grafana-app-sdk v0.48.2 h1:CQQDhwo1fWaXQVKvxxOcK6azbuY3E2TgJHNAZlYYn7U=

View File

@@ -28,6 +28,7 @@ type fileWatcher struct {
timers map[string]*time.Timer
watcher *fsnotify.Watcher
logger logging.Logger
closed bool
}
// File watcher that buffers events for 100ms before actually firing them
@@ -77,22 +78,21 @@ func NewFileWatcher(path string, accept func(string) bool) (FileWatcher, error)
// Keep watching for changes until the context is done
func (f *fileWatcher) Watch(ctx context.Context, events chan<- string) {
defer f.cleanup(events)
for {
select {
case <-ctx.Done():
close(events)
return
case _, ok := <-f.watcher.Errors:
if !ok { // Channel was closed (i.e. Watcher.Close() was called).
close(events)
return
}
// Read from Events.
case e, ok := <-f.watcher.Events:
if !ok { // Channel was closed (i.e. Watcher.Close() was called).
close(events)
return
}
name := filepath.Base(e.Name)
@@ -114,6 +114,11 @@ func (f *fileWatcher) Watch(ctx context.Context, events chan<- string) {
if !ok {
nameCopy := e.Name
t = time.AfterFunc(math.MaxInt64, func() {
// before sending the event, check if the watcher has been closed
if f.closed {
return
}
path, _ := strings.CutPrefix(nameCopy, f.prefix)
events <- path
@@ -128,3 +133,17 @@ func (f *fileWatcher) Watch(ctx context.Context, events chan<- string) {
}
}
}
// stop all pending timers and close the event channel
func (f *fileWatcher) cleanup(events chan<- string) {
f.timersMu.Lock()
defer f.timersMu.Unlock()
for _, timer := range f.timers {
timer.Stop()
}
f.timers = make(map[string]*time.Timer)
close(events)
f.closed = true
}

View File

@@ -35,7 +35,7 @@ require (
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // indirect
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 // indirect
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect

View File

@@ -52,8 +52,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 h1:Muoy+FMGrHj3GdFbvsMzUT7eusgii9PKf9L1ZaXDDbY=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4=
github.com/grafana/grafana-app-sdk v0.48.2 h1:CQQDhwo1fWaXQVKvxxOcK6azbuY3E2TgJHNAZlYYn7U=

2
go.mod
View File

@@ -88,7 +88,7 @@ require (
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // @grafana/grafana-app-platform-squad
github.com/grafana/alerting v0.0.0-20251009192429-9427c24835ae // @grafana/alerting-backend
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // @grafana/identity-access-team
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // @grafana/identity-access-team
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 // @grafana/identity-access-team
github.com/grafana/dataplane/examples v0.0.1 // @grafana/observability-metrics
github.com/grafana/dataplane/sdata v0.0.9 // @grafana/observability-metrics
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // @grafana/grafana-backend-group

4
go.sum
View File

@@ -1615,8 +1615,8 @@ github.com/grafana/alerting v0.0.0-20251009192429-9427c24835ae h1:NLPwY3tIP0lg0g
github.com/grafana/alerting v0.0.0-20251009192429-9427c24835ae/go.mod h1:VGjS5gDwWEADPP6pF/drqLxEImgeuHlEW5u8E5EfIrM=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 h1:Muoy+FMGrHj3GdFbvsMzUT7eusgii9PKf9L1ZaXDDbY=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/dataplane/examples v0.0.1 h1:K9M5glueWyLoL4//H+EtTQq16lXuHLmOhb6DjSCahzA=
github.com/grafana/dataplane/examples v0.0.1/go.mod h1:h5YwY8s407/17XF5/dS8XrUtsTVV2RnuW8+m1Mp46mg=
github.com/grafana/dataplane/sdata v0.0.9 h1:AGL1LZnCUG4MnQtnWpBPbQ8ZpptaZs14w6kE/MWfg7s=

View File

@@ -4,7 +4,7 @@ go 1.25.3
require (
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // @grafana/identity-access-team
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // @grafana/identity-access-team
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 // @grafana/identity-access-team
github.com/stretchr/testify v1.11.1
gopkg.in/yaml.v3 v3.0.1
k8s.io/apimachinery v0.34.2

View File

@@ -34,8 +34,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 h1:Muoy+FMGrHj3GdFbvsMzUT7eusgii9PKf9L1ZaXDDbY=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=

View File

@@ -4,7 +4,7 @@ go 1.25.3
require (
github.com/google/go-cmp v0.7.0
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4
github.com/grafana/grafana-app-sdk/logging v0.48.1
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250514132646-acbc7b54ed9e
github.com/prometheus/client_golang v1.23.2

View File

@@ -67,8 +67,8 @@ github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5T
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 h1:Muoy+FMGrHj3GdFbvsMzUT7eusgii9PKf9L1ZaXDDbY=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4=
github.com/grafana/grafana-app-sdk/logging v0.48.1 h1:veM0X5LAPyN3KsDLglWjIofndbGuf7MqnrDuDN+F/Ng=

View File

@@ -94,6 +94,42 @@ func TestDashboardQueries(t *testing.T) {
Order: "ASC",
}),
},
{
// Tests that MaxRows generates LIMIT clause for regular dashboard queries
Name: "dashboard_with_max_rows",
Data: getQuery(&DashboardQuery{
OrgID: 2,
MaxRows: 100,
}),
},
{
// Tests that MaxRows generates LIMIT clause for history queries
Name: "history_with_max_rows",
Data: getQuery(&DashboardQuery{
OrgID: 1,
GetHistory: true,
MaxRows: 50,
}),
},
{
// Tests that MaxRows + LastID generates correct pagination query
Name: "dashboard_with_max_rows_last_id",
Data: getQuery(&DashboardQuery{
OrgID: 2,
MaxRows: 100,
LastID: 500,
}),
},
{
// Tests that MaxRows + LastID generates correct pagination query
Name: "history_with_max_rows_last_id",
Data: getQuery(&DashboardQuery{
OrgID: 2,
MaxRows: 100,
GetHistory: true,
LastID: 500,
}),
},
},
sqlQueryPanels: {
{

View File

@@ -71,6 +71,9 @@ WHERE dashboard.is_folder = {{ .Arg .Query.GetFolders }}
COALESCE(dashboard_version.version, dashboard.version) {{ .Query.Order }},
{{ end }}
dashboard.uid ASC
{{ if .Query.MaxRows }}
LIMIT {{ .Arg .Query.MaxRows }}
{{ end }}
{{ else }}
{{ if .Query.UID }}
AND dashboard.uid = {{ .Arg .Query.UID }}
@@ -83,4 +86,7 @@ WHERE dashboard.is_folder = {{ .Arg .Query.GetFolders }}
AND dashboard.deleted IS NULL
{{ end }}
ORDER BY dashboard.id DESC
{{ if .Query.MaxRows }}
LIMIT {{ .Arg .Query.MaxRows }}
{{ end }}
{{ end }}

View File

@@ -241,6 +241,128 @@ func (r *rowsWrapper) Value() []byte {
return b
}
// batchingIterator wraps rowsWrapper to fetch data in batches
type batchingIterator struct {
wrapper *rowsWrapper
a *dashboardSqlAccess
ctx context.Context
helper *legacysql.LegacyDatabaseHelper
query *DashboardQuery
batchSize int
done bool
err error
}
var _ resource.ListIterator = (*batchingIterator)(nil)
func (b *batchingIterator) Error() error {
if b.err != nil {
return b.err
}
return b.wrapper.Error()
}
func (b *batchingIterator) ContinueToken() string {
return b.wrapper.ContinueToken()
}
func (b *batchingIterator) ResourceVersion() int64 {
return b.wrapper.ResourceVersion()
}
func (b *batchingIterator) Namespace() string {
return b.wrapper.Namespace()
}
func (b *batchingIterator) Name() string {
return b.wrapper.Name()
}
func (b *batchingIterator) Folder() string {
return b.wrapper.Folder()
}
func (b *batchingIterator) Value() []byte {
return b.wrapper.Value()
}
func (b *batchingIterator) Close() error {
return b.wrapper.Close()
}
func newBatchingIterator(ctx context.Context, a *dashboardSqlAccess, helper *legacysql.LegacyDatabaseHelper, query *DashboardQuery) (*batchingIterator, error) {
iter := &batchingIterator{
a: a,
ctx: ctx,
helper: helper,
query: query,
batchSize: query.MaxRows,
}
// Loads the first batch
if err := iter.nextBatch(query.LastID); err != nil {
return nil, err
}
return iter, nil
}
func (b *batchingIterator) nextBatch(lastID int64) error {
b.query.LastID = lastID
wrapper, err := b.a.getRows(b.ctx, b.helper, b.query)
if err != nil {
return err
}
b.wrapper = wrapper
return nil
}
func (b *batchingIterator) Next() bool {
if b.done {
return false
}
// Try to get next row from current batch
if b.wrapper.Next() {
return true
}
// Check for errors in current wrapper
if b.Error() != nil {
return false
}
// No more rows in current batch - close it
if err := b.wrapper.Close(); err != nil {
// Should not happen, but handle it
b.err = err
b.done = true
return false
}
// Current batch exhausted - check if we got a full batch (might be more data)
if b.wrapper.count < b.batchSize {
// Got fewer rows than batch size, so we're done
b.done = true
return false
}
// Fetch next batch with LastID from last row
if err := b.nextBatch(b.wrapper.row.token.id); err != nil {
b.err = err
b.done = true
return false
}
// Try to get first row from new batch
if b.wrapper.Next() {
return true
}
// New batch is empty, we're done
b.done = true
return false
}
func generateFallbackDashboard(data []byte, title, uid string) ([]byte, error) {
generatedDashboard := map[string]interface{}{
"editable": true,

View File

@@ -296,11 +296,19 @@ func (a *dashboardSqlAccess) ListIterator(ctx context.Context, req *resourcepb.L
return 0, fmt.Errorf("token and orgID mismatch")
}
// Default batch size for iterator - fetch rows in batches to avoid slow queries
const defaultMaxRows = 500
maxRows := defaultMaxRows
if req.Limit > 0 && req.Limit < int64(defaultMaxRows) {
maxRows = int(req.Limit)
}
query := &DashboardQuery{
OrgID: info.OrgID,
Limit: int(req.Limit),
LastID: token.id,
Labels: req.Options.Labels,
OrgID: info.OrgID,
Limit: int(req.Limit),
MaxRows: maxRows,
LastID: token.id,
Labels: req.Options.Labels,
}
sql, err := a.sql(ctx)
@@ -323,14 +331,15 @@ func (a *dashboardSqlAccess) ListIterator(ctx context.Context, req *resourcepb.L
return 0, err
}
listRV *= 1000 // Convert to microseconds
rows, err := a.getRows(ctx, sql, query)
if rows != nil {
iter, err := newBatchingIterator(ctx, a, sql, query)
if iter != nil {
defer func() {
_ = rows.Close()
_ = iter.Close()
}()
}
if err == nil {
err = cb(rows)
err = cb(iter)
}
return listRV, err
}

View File

@@ -0,0 +1,31 @@
SELECT
dashboard.org_id,
dashboard.id,
dashboard.uid,
dashboard.title,
dashboard.folder_uid,
dashboard.deleted,
plugin_id,
provisioning.name as repo_name,
provisioning.external_id as repo_path,
provisioning.check_sum as repo_hash,
provisioning.updated as repo_ts,
dashboard.created,
created_user.uid as created_by,
dashboard.created_by as created_by_id,
dashboard.updated,
updated_user.uid as updated_by,
dashboard.updated_by as updated_by_id,
dashboard.version,
'' as message,
dashboard.data,
dashboard.api_version
FROM `grafana`.`dashboard` as dashboard
LEFT OUTER JOIN `grafana`.`dashboard_provisioning` as provisioning ON dashboard.id = provisioning.dashboard_id
LEFT OUTER JOIN `grafana`.`user` as created_user ON dashboard.created_by = created_user.id
LEFT OUTER JOIN `grafana`.`user` as updated_user ON dashboard.updated_by = updated_user.id
WHERE dashboard.is_folder = FALSE
AND dashboard.org_id = 2
AND dashboard.deleted IS NULL
ORDER BY dashboard.id DESC
LIMIT 100

View File

@@ -0,0 +1,32 @@
SELECT
dashboard.org_id,
dashboard.id,
dashboard.uid,
dashboard.title,
dashboard.folder_uid,
dashboard.deleted,
plugin_id,
provisioning.name as repo_name,
provisioning.external_id as repo_path,
provisioning.check_sum as repo_hash,
provisioning.updated as repo_ts,
dashboard.created,
created_user.uid as created_by,
dashboard.created_by as created_by_id,
dashboard.updated,
updated_user.uid as updated_by,
dashboard.updated_by as updated_by_id,
dashboard.version,
'' as message,
dashboard.data,
dashboard.api_version
FROM `grafana`.`dashboard` as dashboard
LEFT OUTER JOIN `grafana`.`dashboard_provisioning` as provisioning ON dashboard.id = provisioning.dashboard_id
LEFT OUTER JOIN `grafana`.`user` as created_user ON dashboard.created_by = created_user.id
LEFT OUTER JOIN `grafana`.`user` as updated_user ON dashboard.updated_by = updated_user.id
WHERE dashboard.is_folder = FALSE
AND dashboard.org_id = 2
AND dashboard.id < 500
AND dashboard.deleted IS NULL
ORDER BY dashboard.id DESC
LIMIT 100

View File

@@ -0,0 +1,34 @@
SELECT
dashboard.org_id,
dashboard.id,
dashboard.uid,
dashboard.title,
dashboard.folder_uid,
dashboard.deleted,
plugin_id,
provisioning.name as repo_name,
provisioning.external_id as repo_path,
provisioning.check_sum as repo_hash,
provisioning.updated as repo_ts,
dashboard.created,
created_user.uid as created_by,
dashboard.created_by as created_by_id,
dashboard_version.created as updated,
updated_user.uid as updated_by,
dashboard_version.created_by as updated_by_id,
dashboard_version.version,
dashboard_version.message,
dashboard_version.data,
dashboard_version.api_version
FROM `grafana`.`dashboard` as dashboard
LEFT OUTER JOIN `grafana`.`dashboard_version` as dashboard_version ON dashboard.id = dashboard_version.dashboard_id
LEFT OUTER JOIN `grafana`.`dashboard_provisioning` as provisioning ON dashboard.id = provisioning.dashboard_id
LEFT OUTER JOIN `grafana`.`user` as created_user ON dashboard.created_by = created_user.id
LEFT OUTER JOIN `grafana`.`user` as updated_user ON dashboard_version.created_by = updated_user.id
WHERE dashboard.is_folder = FALSE
AND dashboard.org_id = 1
ORDER BY
dashboard_version.created DESC,
dashboard_version.version DESC,
dashboard.uid ASC
LIMIT 50

View File

@@ -0,0 +1,35 @@
SELECT
dashboard.org_id,
dashboard.id,
dashboard.uid,
dashboard.title,
dashboard.folder_uid,
dashboard.deleted,
plugin_id,
provisioning.name as repo_name,
provisioning.external_id as repo_path,
provisioning.check_sum as repo_hash,
provisioning.updated as repo_ts,
dashboard.created,
created_user.uid as created_by,
dashboard.created_by as created_by_id,
dashboard_version.created as updated,
updated_user.uid as updated_by,
dashboard_version.created_by as updated_by_id,
dashboard_version.version,
dashboard_version.message,
dashboard_version.data,
dashboard_version.api_version
FROM `grafana`.`dashboard` as dashboard
LEFT OUTER JOIN `grafana`.`dashboard_version` as dashboard_version ON dashboard.id = dashboard_version.dashboard_id
LEFT OUTER JOIN `grafana`.`dashboard_provisioning` as provisioning ON dashboard.id = provisioning.dashboard_id
LEFT OUTER JOIN `grafana`.`user` as created_user ON dashboard.created_by = created_user.id
LEFT OUTER JOIN `grafana`.`user` as updated_user ON dashboard_version.created_by = updated_user.id
WHERE dashboard.is_folder = FALSE
AND dashboard.org_id = 2
AND dashboard_version.version < 500
ORDER BY
dashboard_version.created DESC,
dashboard_version.version DESC,
dashboard.uid ASC
LIMIT 100

View File

@@ -0,0 +1,31 @@
SELECT
dashboard.org_id,
dashboard.id,
dashboard.uid,
dashboard.title,
dashboard.folder_uid,
dashboard.deleted,
plugin_id,
provisioning.name as repo_name,
provisioning.external_id as repo_path,
provisioning.check_sum as repo_hash,
provisioning.updated as repo_ts,
dashboard.created,
created_user.uid as created_by,
dashboard.created_by as created_by_id,
dashboard.updated,
updated_user.uid as updated_by,
dashboard.updated_by as updated_by_id,
dashboard.version,
'' as message,
dashboard.data,
dashboard.api_version
FROM "grafana"."dashboard" as dashboard
LEFT OUTER JOIN "grafana"."dashboard_provisioning" as provisioning ON dashboard.id = provisioning.dashboard_id
LEFT OUTER JOIN "grafana"."user" as created_user ON dashboard.created_by = created_user.id
LEFT OUTER JOIN "grafana"."user" as updated_user ON dashboard.updated_by = updated_user.id
WHERE dashboard.is_folder = FALSE
AND dashboard.org_id = 2
AND dashboard.deleted IS NULL
ORDER BY dashboard.id DESC
LIMIT 100

View File

@@ -0,0 +1,32 @@
SELECT
dashboard.org_id,
dashboard.id,
dashboard.uid,
dashboard.title,
dashboard.folder_uid,
dashboard.deleted,
plugin_id,
provisioning.name as repo_name,
provisioning.external_id as repo_path,
provisioning.check_sum as repo_hash,
provisioning.updated as repo_ts,
dashboard.created,
created_user.uid as created_by,
dashboard.created_by as created_by_id,
dashboard.updated,
updated_user.uid as updated_by,
dashboard.updated_by as updated_by_id,
dashboard.version,
'' as message,
dashboard.data,
dashboard.api_version
FROM "grafana"."dashboard" as dashboard
LEFT OUTER JOIN "grafana"."dashboard_provisioning" as provisioning ON dashboard.id = provisioning.dashboard_id
LEFT OUTER JOIN "grafana"."user" as created_user ON dashboard.created_by = created_user.id
LEFT OUTER JOIN "grafana"."user" as updated_user ON dashboard.updated_by = updated_user.id
WHERE dashboard.is_folder = FALSE
AND dashboard.org_id = 2
AND dashboard.id < 500
AND dashboard.deleted IS NULL
ORDER BY dashboard.id DESC
LIMIT 100

View File

@@ -0,0 +1,34 @@
SELECT
dashboard.org_id,
dashboard.id,
dashboard.uid,
dashboard.title,
dashboard.folder_uid,
dashboard.deleted,
plugin_id,
provisioning.name as repo_name,
provisioning.external_id as repo_path,
provisioning.check_sum as repo_hash,
provisioning.updated as repo_ts,
dashboard.created,
created_user.uid as created_by,
dashboard.created_by as created_by_id,
dashboard_version.created as updated,
updated_user.uid as updated_by,
dashboard_version.created_by as updated_by_id,
dashboard_version.version,
dashboard_version.message,
dashboard_version.data,
dashboard_version.api_version
FROM "grafana"."dashboard" as dashboard
LEFT OUTER JOIN "grafana"."dashboard_version" as dashboard_version ON dashboard.id = dashboard_version.dashboard_id
LEFT OUTER JOIN "grafana"."dashboard_provisioning" as provisioning ON dashboard.id = provisioning.dashboard_id
LEFT OUTER JOIN "grafana"."user" as created_user ON dashboard.created_by = created_user.id
LEFT OUTER JOIN "grafana"."user" as updated_user ON dashboard_version.created_by = updated_user.id
WHERE dashboard.is_folder = FALSE
AND dashboard.org_id = 1
ORDER BY
dashboard_version.created DESC,
dashboard_version.version DESC,
dashboard.uid ASC
LIMIT 50

View File

@@ -0,0 +1,35 @@
SELECT
dashboard.org_id,
dashboard.id,
dashboard.uid,
dashboard.title,
dashboard.folder_uid,
dashboard.deleted,
plugin_id,
provisioning.name as repo_name,
provisioning.external_id as repo_path,
provisioning.check_sum as repo_hash,
provisioning.updated as repo_ts,
dashboard.created,
created_user.uid as created_by,
dashboard.created_by as created_by_id,
dashboard_version.created as updated,
updated_user.uid as updated_by,
dashboard_version.created_by as updated_by_id,
dashboard_version.version,
dashboard_version.message,
dashboard_version.data,
dashboard_version.api_version
FROM "grafana"."dashboard" as dashboard
LEFT OUTER JOIN "grafana"."dashboard_version" as dashboard_version ON dashboard.id = dashboard_version.dashboard_id
LEFT OUTER JOIN "grafana"."dashboard_provisioning" as provisioning ON dashboard.id = provisioning.dashboard_id
LEFT OUTER JOIN "grafana"."user" as created_user ON dashboard.created_by = created_user.id
LEFT OUTER JOIN "grafana"."user" as updated_user ON dashboard_version.created_by = updated_user.id
WHERE dashboard.is_folder = FALSE
AND dashboard.org_id = 2
AND dashboard_version.version < 500
ORDER BY
dashboard_version.created DESC,
dashboard_version.version DESC,
dashboard.uid ASC
LIMIT 100

View File

@@ -0,0 +1,31 @@
SELECT
dashboard.org_id,
dashboard.id,
dashboard.uid,
dashboard.title,
dashboard.folder_uid,
dashboard.deleted,
plugin_id,
provisioning.name as repo_name,
provisioning.external_id as repo_path,
provisioning.check_sum as repo_hash,
provisioning.updated as repo_ts,
dashboard.created,
created_user.uid as created_by,
dashboard.created_by as created_by_id,
dashboard.updated,
updated_user.uid as updated_by,
dashboard.updated_by as updated_by_id,
dashboard.version,
'' as message,
dashboard.data,
dashboard.api_version
FROM "grafana"."dashboard" as dashboard
LEFT OUTER JOIN "grafana"."dashboard_provisioning" as provisioning ON dashboard.id = provisioning.dashboard_id
LEFT OUTER JOIN "grafana"."user" as created_user ON dashboard.created_by = created_user.id
LEFT OUTER JOIN "grafana"."user" as updated_user ON dashboard.updated_by = updated_user.id
WHERE dashboard.is_folder = FALSE
AND dashboard.org_id = 2
AND dashboard.deleted IS NULL
ORDER BY dashboard.id DESC
LIMIT 100

View File

@@ -0,0 +1,32 @@
SELECT
dashboard.org_id,
dashboard.id,
dashboard.uid,
dashboard.title,
dashboard.folder_uid,
dashboard.deleted,
plugin_id,
provisioning.name as repo_name,
provisioning.external_id as repo_path,
provisioning.check_sum as repo_hash,
provisioning.updated as repo_ts,
dashboard.created,
created_user.uid as created_by,
dashboard.created_by as created_by_id,
dashboard.updated,
updated_user.uid as updated_by,
dashboard.updated_by as updated_by_id,
dashboard.version,
'' as message,
dashboard.data,
dashboard.api_version
FROM "grafana"."dashboard" as dashboard
LEFT OUTER JOIN "grafana"."dashboard_provisioning" as provisioning ON dashboard.id = provisioning.dashboard_id
LEFT OUTER JOIN "grafana"."user" as created_user ON dashboard.created_by = created_user.id
LEFT OUTER JOIN "grafana"."user" as updated_user ON dashboard.updated_by = updated_user.id
WHERE dashboard.is_folder = FALSE
AND dashboard.org_id = 2
AND dashboard.id < 500
AND dashboard.deleted IS NULL
ORDER BY dashboard.id DESC
LIMIT 100

View File

@@ -0,0 +1,34 @@
SELECT
dashboard.org_id,
dashboard.id,
dashboard.uid,
dashboard.title,
dashboard.folder_uid,
dashboard.deleted,
plugin_id,
provisioning.name as repo_name,
provisioning.external_id as repo_path,
provisioning.check_sum as repo_hash,
provisioning.updated as repo_ts,
dashboard.created,
created_user.uid as created_by,
dashboard.created_by as created_by_id,
dashboard_version.created as updated,
updated_user.uid as updated_by,
dashboard_version.created_by as updated_by_id,
dashboard_version.version,
dashboard_version.message,
dashboard_version.data,
dashboard_version.api_version
FROM "grafana"."dashboard" as dashboard
LEFT OUTER JOIN "grafana"."dashboard_version" as dashboard_version ON dashboard.id = dashboard_version.dashboard_id
LEFT OUTER JOIN "grafana"."dashboard_provisioning" as provisioning ON dashboard.id = provisioning.dashboard_id
LEFT OUTER JOIN "grafana"."user" as created_user ON dashboard.created_by = created_user.id
LEFT OUTER JOIN "grafana"."user" as updated_user ON dashboard_version.created_by = updated_user.id
WHERE dashboard.is_folder = FALSE
AND dashboard.org_id = 1
ORDER BY
dashboard_version.created DESC,
dashboard_version.version DESC,
dashboard.uid ASC
LIMIT 50

View File

@@ -0,0 +1,35 @@
SELECT
dashboard.org_id,
dashboard.id,
dashboard.uid,
dashboard.title,
dashboard.folder_uid,
dashboard.deleted,
plugin_id,
provisioning.name as repo_name,
provisioning.external_id as repo_path,
provisioning.check_sum as repo_hash,
provisioning.updated as repo_ts,
dashboard.created,
created_user.uid as created_by,
dashboard.created_by as created_by_id,
dashboard_version.created as updated,
updated_user.uid as updated_by,
dashboard_version.created_by as updated_by_id,
dashboard_version.version,
dashboard_version.message,
dashboard_version.data,
dashboard_version.api_version
FROM "grafana"."dashboard" as dashboard
LEFT OUTER JOIN "grafana"."dashboard_version" as dashboard_version ON dashboard.id = dashboard_version.dashboard_id
LEFT OUTER JOIN "grafana"."dashboard_provisioning" as provisioning ON dashboard.id = provisioning.dashboard_id
LEFT OUTER JOIN "grafana"."user" as created_user ON dashboard.created_by = created_user.id
LEFT OUTER JOIN "grafana"."user" as updated_user ON dashboard_version.created_by = updated_user.id
WHERE dashboard.is_folder = FALSE
AND dashboard.org_id = 2
AND dashboard_version.version < 500
ORDER BY
dashboard_version.created DESC,
dashboard_version.version DESC,
dashboard.uid ASC
LIMIT 100

View File

@@ -16,6 +16,11 @@ type DashboardQuery struct {
UID string // to select a single dashboard
Limit int
// MaxRows is used internally by the iterator to fetch data in batches
// When set, the SQL query will include LIMIT MaxRows
// If Limit is smaller, that will be used instead
MaxRows int
// Included in the continue token
// This is the ID from the last dashboard sent in the previous page
LastID int64

View File

@@ -22,7 +22,7 @@ import (
)
func TestDashboardAPIBuilder_Mutate(t *testing.T) {
migration.Initialize(testutil.NewDataSourceProvider(testutil.StandardTestConfig), testutil.NewLibraryElementProvider())
migration.Initialize(testutil.NewDataSourceProvider(testutil.StandardTestConfig), testutil.NewLibraryElementProvider(), migration.DefaultCacheTTL)
tests := []struct {
name string
inputObj runtime.Object

View File

@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"maps"
"strconv"
"strings"
"github.com/prometheus/client_golang/prometheus"
@@ -179,13 +180,30 @@ func RegisterAPIService(
datasourceService: datasourceService,
}, &libraryElementIndexProvider{
libraryElementService: libraryPanels,
})
}, cfg.DashboardSchemaMigrationCacheTTL)
// For single-tenant deployments (indicated by StackID), preload the cache in the background
if cfg.StackID != "" {
// Single namespace for cloud stack
stackID, err := strconv.ParseInt(cfg.StackID, 10, 64)
if err == nil {
var nsInfo authlib.NamespaceInfo
nsInfo, err = authlib.ParseNamespace(authlib.CloudNamespaceFormatter(stackID))
if err == nil {
migration.PreloadCacheInBackground([]authlib.NamespaceInfo{nsInfo})
}
}
if err != nil {
logging.DefaultLogger.Error("failed to parse namespace for cache preloading", "stackId", cfg.StackID, "err", err)
}
}
apiregistration.RegisterAPI(builder)
return builder
}
func NewAPIService(ac authlib.AccessClient, features featuremgmt.FeatureToggles, folderClientProvider client.K8sHandlerProvider, datasourceProvider schemaversion.DataSourceIndexProvider, libraryElementProvider schemaversion.LibraryElementIndexProvider, resourcePermissionsSvc *dynamic.NamespaceableResourceInterface) *DashboardsAPIBuilder {
migration.Initialize(datasourceProvider, libraryElementProvider)
migration.Initialize(datasourceProvider, libraryElementProvider, migration.DefaultCacheTTL)
return &DashboardsAPIBuilder{
minRefreshInterval: "10s",
accessClient: ac,

View File

@@ -3,6 +3,7 @@ package dashboard
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
@@ -272,7 +273,8 @@ func (s *SearchHandler) DoSortable(w http.ResponseWriter, r *http.Request) {
const rootFolder = "general"
// nolint:gocyclo
var errEmptyResults = fmt.Errorf("empty results")
func (s *SearchHandler) DoSearch(w http.ResponseWriter, r *http.Request) {
ctx, span := s.tracer.Start(r.Context(), "dashboard.search")
defer span.End()
@@ -289,6 +291,51 @@ func (s *SearchHandler) DoSearch(w http.ResponseWriter, r *http.Request) {
return
}
searchRequest, err := convertHttpSearchRequestToResourceSearchRequest(queryParams, user, func() ([]string, error) {
return s.getDashboardsUIDsSharedWithUser(ctx, user)
})
if err != nil {
if errors.Is(err, errEmptyResults) {
s.write(w, dashboardv0alpha1.SearchResults{
Hits: []dashboardv0alpha1.DashboardHit{},
})
} else {
errhttp.Write(ctx, err, w)
}
return
}
result, err := s.client.Search(ctx, searchRequest)
if err != nil {
errhttp.Write(ctx, err, w)
return
}
if result != nil {
s.log.Debug("search result hits and cost", "total_hits", result.TotalHits, "query_cost", result.QueryCost)
}
parsedResults, err := dashboardsearch.ParseResults(result, searchRequest.Offset)
if err != nil {
errhttp.Write(ctx, err, w)
return
}
if len(searchRequest.SortBy) == 0 {
// default sort by resource descending ( folders then dashboards ) then title
sort.Slice(parsedResults.Hits, func(i, j int) bool {
// Just sorting by resource for now. The rest should be sorted by search score already
return parsedResults.Hits[i].Resource > parsedResults.Hits[j].Resource
})
}
s.write(w, parsedResults)
}
// convertHttpSearchRequestToResourceSearchRequest create ResourceSearchRequest from query parameters.
// Supplied function is used to get dashboards shared with user.
// nolint:gocyclo
func convertHttpSearchRequestToResourceSearchRequest(queryParams url.Values, user identity.Requester, getDashboardsUIDsSharedWithUser func() ([]string, error)) (*resourcepb.ResourceSearchRequest, error) {
// get limit and offset from query params
limit := 50
offset := 0
@@ -339,6 +386,7 @@ func (s *SearchHandler) DoSearch(w http.ResponseWriter, r *http.Request) {
hasDash := len(types) == 0 || slices.Contains(types, "dashboard")
hasFolder := len(types) == 0 || slices.Contains(types, "folder")
// If both types are present, we need to search both dashboards and folders, by default is nothing is set we also search both.
var err error
if (hasDash && hasFolder) || (!hasDash && !hasFolder) {
searchRequest.Options.Key, err = asResourceKey(user.GetNamespace(), dashboardv0alpha1.DASHBOARD_RESOURCE)
if err == nil {
@@ -352,8 +400,7 @@ func (s *SearchHandler) DoSearch(w http.ResponseWriter, r *http.Request) {
searchRequest.Options.Key, err = asResourceKey(user.GetNamespace(), dashboardv0alpha1.DASHBOARD_RESOURCE)
}
if err != nil {
errhttp.Write(ctx, err, w)
return
return nil, err
}
// Add sorting
@@ -384,20 +431,20 @@ func (s *SearchHandler) DoSearch(w http.ResponseWriter, r *http.Request) {
// The tags filter
if tags, ok := queryParams["tag"]; ok {
searchRequest.Options.Fields = []*resourcepb.Requirement{{
searchRequest.Options.Fields = append(searchRequest.Options.Fields, &resourcepb.Requirement{
Key: "tags",
Operator: "=",
Values: tags,
}}
})
}
// The libraryPanel filter
if libraryPanel, ok := queryParams["libraryPanel"]; ok {
searchRequest.Options.Fields = []*resourcepb.Requirement{{
searchRequest.Options.Fields = append(searchRequest.Options.Fields, &resourcepb.Requirement{
Key: search.DASHBOARD_LIBRARY_PANEL_REFERENCE,
Operator: "=",
Values: libraryPanel,
}}
})
}
// The names filter
@@ -406,17 +453,13 @@ func (s *SearchHandler) DoSearch(w http.ResponseWriter, r *http.Request) {
// Add the folder constraint. Note this does not do recursive search
folder := queryParams.Get("folder")
if folder == foldermodel.SharedWithMeFolderUID {
dashboardUIDs, err := s.getDashboardsUIDsSharedWithUser(ctx, user)
dashboardUIDs, err := getDashboardsUIDsSharedWithUser()
if err != nil {
errhttp.Write(ctx, err, w)
return
return nil, err
}
if len(dashboardUIDs) == 0 {
s.write(w, dashboardv0alpha1.SearchResults{
Hits: []dashboardv0alpha1.DashboardHit{},
})
return
return nil, errEmptyResults
}
// hijacks the "name" query param to only search for shared dashboard UIDs
names = append(names, dashboardUIDs...)
@@ -424,50 +467,21 @@ func (s *SearchHandler) DoSearch(w http.ResponseWriter, r *http.Request) {
if folder == rootFolder {
folder = "" // root folder is empty in the search index
}
searchRequest.Options.Fields = []*resourcepb.Requirement{{
searchRequest.Options.Fields = append(searchRequest.Options.Fields, &resourcepb.Requirement{
Key: "folder",
Operator: "=",
Values: []string{folder},
}}
}
if len(names) > 0 {
if searchRequest.Options.Fields == nil {
searchRequest.Options.Fields = []*resourcepb.Requirement{}
}
namesFilter := []*resourcepb.Requirement{{
Key: "name",
Operator: "in",
Values: names,
}}
searchRequest.Options.Fields = append(searchRequest.Options.Fields, namesFilter...)
}
result, err := s.client.Search(ctx, searchRequest)
if err != nil {
errhttp.Write(ctx, err, w)
return
}
if result != nil {
s.log.Debug("search result hits and cost", "total_hits", result.TotalHits, "query_cost", result.QueryCost)
}
parsedResults, err := dashboardsearch.ParseResults(result, searchRequest.Offset)
if err != nil {
errhttp.Write(ctx, err, w)
return
}
if len(searchRequest.SortBy) == 0 {
// default sort by resource descending ( folders then dashboards ) then title
sort.Slice(parsedResults.Hits, func(i, j int) bool {
// Just sorting by resource for now. The rest should be sorted by search score already
return parsedResults.Hits[i].Resource > parsedResults.Hits[j].Resource
})
}
s.write(w, parsedResults)
if len(names) > 0 {
searchRequest.Options.Fields = append(searchRequest.Options.Fields, &resourcepb.Requirement{
Key: "name",
Operator: "in",
Values: names,
})
}
return searchRequest, nil
}
func (s *SearchHandler) write(w http.ResponseWriter, obj any) {

View File

@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"net/http/httptest"
"net/url"
"testing"
"github.com/stretchr/testify/assert"
@@ -17,6 +18,7 @@ import (
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/dashboards/dashboardaccess"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/setting"
@@ -282,93 +284,6 @@ func TestSearchHandlerPagination(t *testing.T) {
}
func TestSearchHandler(t *testing.T) {
t.Run("Multiple comma separated fields will be appended to default dashboard search fields", func(t *testing.T) {
// Create a mock client
mockClient := &MockClient{}
features := featuremgmt.WithFeatures()
// Initialize the search handler with the mock client
searchHandler := SearchHandler{
log: log.New("test", "test"),
client: mockClient,
tracer: tracing.NewNoopTracerService(),
features: features,
}
rr := httptest.NewRecorder()
req := httptest.NewRequest("GET", "/search?field=field1&field=field2&field=field3", nil)
req.Header.Add("content-type", "application/json")
req = req.WithContext(identity.WithRequester(req.Context(), &user.SignedInUser{Namespace: "test"}))
searchHandler.DoSearch(rr, req)
if mockClient.LastSearchRequest == nil {
t.Fatalf("expected Search to be called, but it was not")
}
expectedFields := []string{"title", "folder", "tags", "description", "manager.kind", "manager.id", "field1", "field2", "field3"}
if fmt.Sprintf("%v", mockClient.LastSearchRequest.Fields) != fmt.Sprintf("%v", expectedFields) {
t.Errorf("expected fields %v, got %v", expectedFields, mockClient.LastSearchRequest.Fields)
}
})
t.Run("Single field will be appended to default dashboard search fields", func(t *testing.T) {
// Create a mock client
mockClient := &MockClient{}
features := featuremgmt.WithFeatures()
// Initialize the search handler with the mock client
searchHandler := SearchHandler{
log: log.New("test", "test"),
client: mockClient,
tracer: tracing.NewNoopTracerService(),
features: features,
}
rr := httptest.NewRecorder()
req := httptest.NewRequest("GET", "/search?field=field1", nil)
req.Header.Add("content-type", "application/json")
req = req.WithContext(identity.WithRequester(req.Context(), &user.SignedInUser{Namespace: "test"}))
searchHandler.DoSearch(rr, req)
if mockClient.LastSearchRequest == nil {
t.Fatalf("expected Search to be called, but it was not")
}
expectedFields := []string{"title", "folder", "tags", "description", "manager.kind", "manager.id", "field1"}
if fmt.Sprintf("%v", mockClient.LastSearchRequest.Fields) != fmt.Sprintf("%v", expectedFields) {
t.Errorf("expected fields %v, got %v", expectedFields, mockClient.LastSearchRequest.Fields)
}
})
t.Run("Passing no fields will search using default dashboard fields", func(t *testing.T) {
// Create a mock client
mockClient := &MockClient{}
features := featuremgmt.WithFeatures()
// Initialize the search handler with the mock client
searchHandler := SearchHandler{
log: log.New("test", "test"),
client: mockClient,
tracer: tracing.NewNoopTracerService(),
features: features,
}
rr := httptest.NewRecorder()
req := httptest.NewRequest("GET", "/search", nil)
req.Header.Add("content-type", "application/json")
req = req.WithContext(identity.WithRequester(req.Context(), &user.SignedInUser{Namespace: "test"}))
searchHandler.DoSearch(rr, req)
if mockClient.LastSearchRequest == nil {
t.Fatalf("expected Search to be called, but it was not")
}
expectedFields := []string{"title", "folder", "tags", "description", "manager.kind", "manager.id"}
if fmt.Sprintf("%v", mockClient.LastSearchRequest.Fields) != fmt.Sprintf("%v", expectedFields) {
t.Errorf("expected fields %v, got %v", expectedFields, mockClient.LastSearchRequest.Fields)
}
})
t.Run("Sort - default sort by resource", func(t *testing.T) {
rows := make([]*resourcepb.ResourceTableRow, len(mockResults))
for i, r := range mockResults {
@@ -689,6 +604,408 @@ func TestSearchHandlerSharedDashboards(t *testing.T) {
})
}
func TestConvertHttpSearchRequestToResourceSearchRequest(t *testing.T) {
testUser := &user.SignedInUser{
Namespace: "test-namespace",
OrgID: 1,
}
dashboardKey := &resourcepb.ResourceKey{
Group: "dashboard.grafana.app",
Resource: "dashboards",
Namespace: "test-namespace",
}
folderKey := &resourcepb.ResourceKey{
Group: "folder.grafana.app",
Resource: "folders",
Namespace: "test-namespace",
}
defaultFields := []string{"title", "folder", "tags", "description", "manager.kind", "manager.id"}
tests := map[string]struct {
queryString string
sharedDashboards []string
sharedDashboardsError error
expected *resourcepb.ResourceSearchRequest
expectedError error
}{
"default values with no query params": {
queryString: "",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{Key: dashboardKey},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"custom limit and offset": {
queryString: "limit=100&offset=50",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{Key: dashboardKey},
Query: "",
Limit: 100,
Offset: 50,
Page: 1,
Explain: false,
Fields: defaultFields,
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"pagination with page parameter": {
queryString: "limit=25&page=3",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{Key: dashboardKey},
Query: "",
Limit: 25,
Offset: 50,
Page: 3,
Explain: false,
Fields: defaultFields,
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"query string and explain": {
queryString: "query=test-query&explain=true",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{Key: dashboardKey},
Query: "test-query",
Limit: 50,
Offset: 0,
Page: 1,
Explain: true,
Fields: defaultFields,
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"additional fields": {
queryString: "field=custom1&field=custom2",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{Key: dashboardKey},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: append(defaultFields, "custom1", "custom2"),
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"view permission": {
queryString: "permission=view",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{Key: dashboardKey},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
Permission: int64(dashboardaccess.PERMISSION_VIEW),
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"edit permission": {
queryString: "permission=Edit",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{Key: dashboardKey},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
Permission: int64(dashboardaccess.PERMISSION_EDIT),
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"admin permission": {
queryString: "permission=ADMIN",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{Key: dashboardKey},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
Permission: int64(dashboardaccess.PERMISSION_ADMIN),
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"type dashboard only": {
queryString: "type=dashboard",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{Key: dashboardKey},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
},
},
"type folder only": {
queryString: "type=folder",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{Key: folderKey},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
},
},
"both types should include federated": {
queryString: "type=dashboard&type=folder",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{Key: dashboardKey},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"sort ascending": {
queryString: "sort=title",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{Key: dashboardKey},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
SortBy: []*resourcepb.ResourceSearchRequest_Sort{{Field: "title", Desc: false}},
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"sort descending": {
queryString: "sort=-title",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{Key: dashboardKey},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
SortBy: []*resourcepb.ResourceSearchRequest_Sort{{Field: "title", Desc: true}},
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"facet fields": {
queryString: "facet=tags&facet=folder",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{Key: dashboardKey},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
Facet: map[string]*resourcepb.ResourceSearchRequest_Facet{
"tags": {Field: "tags", Limit: 50},
"folder": {Field: "folder", Limit: 50},
},
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"tag filter": {
queryString: "tag=tag1&tag=tag2",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{
Key: dashboardKey,
Fields: []*resourcepb.Requirement{{Key: "tags", Operator: "=", Values: []string{"tag1", "tag2"}}},
},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"folder filter": {
queryString: "folder=my-folder",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{
Key: dashboardKey,
Fields: []*resourcepb.Requirement{{Key: "folder", Operator: "=", Values: []string{"my-folder"}}},
},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"tag and folder filter together": {
queryString: "tag=tag1&tag=tag2&folder=my-folder",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{
Key: dashboardKey,
Fields: []*resourcepb.Requirement{
{Key: "tags", Operator: "=", Values: []string{"tag1", "tag2"}},
{Key: "folder", Operator: "=", Values: []string{"my-folder"}},
},
},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"root folder should be converted to empty string": {
queryString: "folder=general",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{
Key: dashboardKey,
Fields: []*resourcepb.Requirement{{Key: "folder", Operator: "=", Values: []string{""}}},
},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"shared with me folder with dashboards": {
queryString: "folder=sharedwithme",
sharedDashboards: []string{"dash1", "dash2", "dash3"},
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{
Key: dashboardKey,
Fields: []*resourcepb.Requirement{{Key: "name", Operator: "in", Values: []string{"dash1", "dash2", "dash3"}}},
},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"shared with me folder without dashboards returns error": {
queryString: "folder=sharedwithme",
sharedDashboards: []string{},
expectedError: errEmptyResults,
},
"name filter": {
queryString: "name=name1&name=name2",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{
Key: dashboardKey,
Fields: []*resourcepb.Requirement{{Key: "name", Operator: "in", Values: []string{"name1", "name2"}}},
},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"comprehensive filter with query, tags, folder, and name": {
queryString: "query=search-term&tag=monitoring&tag=prod&folder=my-folder&name=dash1&name=dash2",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{
Key: dashboardKey,
Fields: []*resourcepb.Requirement{
{Key: "tags", Operator: "=", Values: []string{"monitoring", "prod"}},
{Key: "folder", Operator: "=", Values: []string{"my-folder"}},
{Key: "name", Operator: "in", Values: []string{"dash1", "dash2"}},
},
},
Query: "search-term",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"libraryPanel filter": {
queryString: "libraryPanel=panel1&libraryPanel=panel2",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{
Key: dashboardKey,
Fields: []*resourcepb.Requirement{{Key: "reference.LibraryPanel", Operator: "=", Values: []string{"panel1", "panel2"}}},
},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
"libraryPanel and tag filter together": {
queryString: "libraryPanel=panel1&tag=monitoring&tag=prod",
expected: &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{
Key: dashboardKey,
Fields: []*resourcepb.Requirement{
{Key: "tags", Operator: "=", Values: []string{"monitoring", "prod"}},
{Key: "reference.LibraryPanel", Operator: "=", Values: []string{"panel1"}},
},
},
Query: "",
Limit: 50,
Offset: 0,
Page: 1,
Explain: false,
Fields: defaultFields,
Federated: []*resourcepb.ResourceKey{folderKey},
},
},
}
for name, tt := range tests {
t.Run(name, func(t *testing.T) {
queryParams, err := url.ParseQuery(tt.queryString)
require.NoError(t, err)
getDashboardsFunc := func() ([]string, error) {
if tt.sharedDashboardsError != nil {
return nil, tt.sharedDashboardsError
}
return tt.sharedDashboards, nil
}
result, err := convertHttpSearchRequestToResourceSearchRequest(queryParams, testUser, getDashboardsFunc)
if tt.expectedError != nil {
assert.ErrorIs(t, err, tt.expectedError)
return
}
require.NoError(t, err)
require.NotNil(t, result)
assert.Equal(t, tt.expected, result)
})
}
}
// MockClient implements the ResourceIndexClient interface for testing
type MockClient struct {
resourcepb.ResourceIndexClient

View File

@@ -657,7 +657,7 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api
azurePromMigrationService := promtypemigration.ProvideAzurePromMigrationService(service15, inMemory, repoManager, pluginInstaller, cfg)
amazonPromMigrationService := promtypemigration.ProvideAmazonPromMigrationService(service15, inMemory, repoManager, pluginInstaller, cfg)
promTypeMigrationProviderImpl := promtypemigration.ProvidePromTypeMigrationProvider(serverLockService, featureToggles, azurePromMigrationService, amazonPromMigrationService)
provisioningServiceImpl, err := provisioning.ProvideService(accessControl, cfg, sqlStore, pluginstoreService, dBstore, serviceService, notificationService, dashboardProvisioningService, service15, correlationsService, dashboardService, folderimplService, service13, searchService, quotaService, secretsService, orgService, receiverPermissionsService, tracingService, dualwriteService, promTypeMigrationProviderImpl)
provisioningServiceImpl, err := provisioning.ProvideService(accessControl, cfg, sqlStore, pluginstoreService, dBstore, serviceService, notificationService, dashboardProvisioningService, service15, correlationsService, dashboardService, folderimplService, service13, searchService, quotaService, secretsService, orgService, receiverPermissionsService, tracingService, dualwriteService, promTypeMigrationProviderImpl, serverLockService)
if err != nil {
return nil, err
}
@@ -1294,7 +1294,7 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac
azurePromMigrationService := promtypemigration.ProvideAzurePromMigrationService(service15, inMemory, repoManager, pluginInstaller, cfg)
amazonPromMigrationService := promtypemigration.ProvideAmazonPromMigrationService(service15, inMemory, repoManager, pluginInstaller, cfg)
promTypeMigrationProviderImpl := promtypemigration.ProvidePromTypeMigrationProvider(serverLockService, featureToggles, azurePromMigrationService, amazonPromMigrationService)
provisioningServiceImpl, err := provisioning.ProvideService(accessControl, cfg, sqlStore, pluginstoreService, dBstore, serviceService, notificationService, dashboardProvisioningService, service15, correlationsService, dashboardService, folderimplService, service13, searchService, quotaService, secretsService, orgService, receiverPermissionsService, tracingService, dualwriteService, promTypeMigrationProviderImpl)
provisioningServiceImpl, err := provisioning.ProvideService(accessControl, cfg, sqlStore, pluginstoreService, dBstore, serviceService, notificationService, dashboardProvisioningService, service15, correlationsService, dashboardService, folderimplService, service13, searchService, quotaService, secretsService, orgService, receiverPermissionsService, tracingService, dualwriteService, promTypeMigrationProviderImpl, serverLockService)
if err != nil {
return nil, err
}

View File

@@ -373,23 +373,10 @@ func TestExtendedJWT_Authenticate(t *testing.T) {
ExpectIssuer: "http://localhost:3000",
},
},
want: &authn.Identity{
ID: "this-uid",
UID: "this-uid",
Name: "this-uid",
Type: claims.TypeAccessPolicy,
OrgID: 1,
AccessTokenClaims: &validAccessTokenClaimsWithDeprecatedStackClaimSet,
Namespace: "stack-1234",
AuthenticatedBy: "extendedjwt",
AuthID: "access-policy:this-uid",
ClientParams: authn.ClientParams{
SyncPermissions: true,
},
},
wantErr: errExtJWTDisallowedNamespaceClaim,
},
{
name: "should authenticate as user using specific deprecated namespace claim in access and id tokens",
name: "should NOT authenticate as user using specific deprecated namespace claim in access and id tokens",
accessToken: &validAccessTokenClaimsWithDeprecatedStackClaimSet,
idToken: &validIDTokenClaimsWithDeprecatedStackClaimSet,
orgID: 1,
@@ -401,20 +388,7 @@ func TestExtendedJWT_Authenticate(t *testing.T) {
ExpectIssuer: "http://localhost:3000",
},
},
want: &authn.Identity{
ID: "2",
Type: claims.TypeUser,
OrgID: 1,
AccessTokenClaims: &validAccessTokenClaimsWithDeprecatedStackClaimSet,
IDTokenClaims: &validIDTokenClaimsWithDeprecatedStackClaimSet,
Namespace: "stack-1234",
AuthenticatedBy: "extendedjwt",
AuthID: "access-policy:this-uid",
ClientParams: authn.ClientParams{
SyncPermissions: true,
FetchSyncedUser: true,
},
},
wantErr: errExtJWTDisallowedNamespaceClaim,
},
{
name: "should authenticate as user using wildcard namespace for access token, setting allowed namespace to specific",

View File

@@ -2009,7 +2009,14 @@ func (dr *DashboardServiceImpl) searchDashboardsThroughK8sRaw(ctx context.Contex
request.Limit = query.Limit
request.Page = query.Page
request.Offset = (query.Page - 1) * query.Limit // only relevant when running in modes 3+
request.Fields = dashboardsearch.IncludeFields
request.Fields = append(
dashboardsearch.IncludeFields,
// Include the dashboard legacy ID in the results, as it is needed when
// determining whether a provisioned dashboard exists or not, see
// `(*DashboardServiceImpl).searchProvisionedDashboardsThroughK8s`.
resource.SEARCH_FIELD_LEGACY_ID,
resource.SEARCH_FIELD_LABELS+"."+resource.SEARCH_FIELD_LEGACY_ID,
)
namespace := dr.k8sclient.GetNamespace(query.OrgId)
var err error

View File

@@ -2016,6 +2016,26 @@ func TestSearchDashboardsThroughK8sRaw(t *testing.T) {
_, err := service.searchDashboardsThroughK8s(ctx, query)
require.NoError(t, err)
})
t.Run("search will request legacy dashboard ID", func(t *testing.T) {
ctx := context.Background()
k8sCliMock := new(client.MockK8sHandler)
service := &DashboardServiceImpl{k8sclient: k8sCliMock}
query := &dashboards.FindPersistedDashboardsQuery{
ManagedBy: utils.ManagerKindClassicFP, //nolint:staticcheck
OrgId: 1,
}
k8sCliMock.On("GetNamespace", mock.Anything, mock.Anything).Return("default")
k8sCliMock.On("Search", mock.Anything, mock.Anything, mock.MatchedBy(func(req *resourcepb.ResourceSearchRequest) bool {
return slices.Contains(req.Fields, "grafana.app/deprecatedInternalID") &&
slices.Contains(req.Fields, "labels.grafana.app/deprecatedInternalID")
})).Return(&resourcepb.ResourceSearchResponse{
Results: &resourcepb.ResourceTable{},
TotalHits: 0,
}, nil)
_, err := service.searchDashboardsThroughK8s(ctx, query)
require.NoError(t, err)
})
}
func TestSearchProvisionedDashboardsThroughK8sRaw(t *testing.T) {

View File

@@ -2,6 +2,7 @@ package dashboards
import (
"context"
"errors"
"fmt"
"os"
"time"
@@ -9,10 +10,12 @@ import (
dashboardV1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1"
folderV1 "github.com/grafana/grafana/apps/folder/pkg/apis/folder/v1beta1"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/serverlock"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/folder"
"github.com/grafana/grafana/pkg/services/org"
"github.com/grafana/grafana/pkg/services/provisioning/utils"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/storage/legacysql/dualwrite"
)
@@ -28,7 +31,7 @@ type DashboardProvisioner interface {
}
// DashboardProvisionerFactory creates DashboardProvisioners based on input
type DashboardProvisionerFactory func(context.Context, string, dashboards.DashboardProvisioningService, org.Service, utils.DashboardStore, folder.Service, dualwrite.Service) (DashboardProvisioner, error)
type DashboardProvisionerFactory func(context.Context, string, dashboards.DashboardProvisioningService, *setting.Cfg, org.Service, utils.DashboardStore, folder.Service, dualwrite.Service, *serverlock.ServerLockService) (DashboardProvisioner, error)
// Provisioner is responsible for syncing dashboard from disk to Grafana's database.
type Provisioner struct {
@@ -38,6 +41,8 @@ type Provisioner struct {
duplicateValidator duplicateValidator
provisioner dashboards.DashboardProvisioningService
dual dualwrite.Service
serverLock *serverlock.ServerLockService
cfg *setting.Cfg
}
func (provider *Provisioner) HasDashboardSources() bool {
@@ -45,7 +50,7 @@ func (provider *Provisioner) HasDashboardSources() bool {
}
// New returns a new DashboardProvisioner
func New(ctx context.Context, configDirectory string, provisioner dashboards.DashboardProvisioningService, orgService org.Service, dashboardStore utils.DashboardStore, folderService folder.Service, dual dualwrite.Service) (DashboardProvisioner, error) {
func New(ctx context.Context, configDirectory string, provisioner dashboards.DashboardProvisioningService, cfg *setting.Cfg, orgService org.Service, dashboardStore utils.DashboardStore, folderService folder.Service, dual dualwrite.Service, serverLockService *serverlock.ServerLockService) (DashboardProvisioner, error) {
logger := log.New("provisioning.dashboard")
cfgReader := &configReader{path: configDirectory, log: logger, orgExists: utils.NewOrgExistsChecker(orgService)}
configs, err := cfgReader.readConfig(ctx)
@@ -78,6 +83,8 @@ func New(ctx context.Context, configDirectory string, provisioner dashboards.Das
duplicateValidator: newDuplicateValidator(logger, fileReaders),
provisioner: provisioner,
dual: dual,
serverLock: serverLockService,
cfg: cfg,
}
return d, nil
@@ -95,23 +102,53 @@ func (provider *Provisioner) Provision(ctx context.Context) error {
}
}
provider.log.Info("starting to provision dashboards")
var errProvisioning error
for _, reader := range provider.fileReaders {
if err := reader.walkDisk(ctx); err != nil {
if os.IsNotExist(err) {
// don't stop the provisioning service in case the folder is missing. The folder can appear after the startup
provider.log.Warn("Failed to provision config", "name", reader.Cfg.Name, "error", err)
return nil
}
return fmt.Errorf("failed to provision config %v: %w", reader.Cfg.Name, err)
// retry obtaining the lock for 20 attempts
retryOpt := func(attempts int) error {
if attempts < 20 {
return nil
}
return errors.New("retries exhausted")
}
provider.duplicateValidator.validate()
provider.log.Info("finished to provision dashboards")
return nil
lockTimeConfig := serverlock.LockTimeConfig{
// if a replica crashes while holding the lock, other replicas can obtain the
// lock after this duration (15s default value, might be configured via config file)
MaxInterval: time.Duration(provider.cfg.ClassicProvisioningDashboardsServerLockMaxIntervalSeconds) * time.Second,
// wait beetween 100ms and 1s before retrying to obtain the lock (default values, might be configured via config file)
MinWait: time.Duration(provider.cfg.ClassicProvisioningDashboardsServerLockMinWaitMs) * time.Millisecond,
MaxWait: time.Duration(provider.cfg.ClassicProvisioningDashboardsServerLockMaxWaitMs) * time.Millisecond,
}
// this means that if we fail to obtain the lock after ~10 seconds, we return an error
lockErr := provider.serverLock.LockExecuteAndReleaseWithRetries(ctx, "provisioning_dashboards", lockTimeConfig, func(ctx context.Context) {
provider.log.Info("starting to provision dashboards")
for _, reader := range provider.fileReaders {
if err := reader.walkDisk(ctx); err != nil {
if os.IsNotExist(err) {
// don't stop the provisioning service in case the folder is missing. The folder can appear after the startup
provider.log.Warn("Failed to provision config", "name", reader.Cfg.Name, "error", err)
return
}
errProvisioning = fmt.Errorf("failed to provision config %v: %w", reader.Cfg.Name, err)
return
}
}
provider.duplicateValidator.validate()
provider.log.Info("finished to provision dashboards")
}, retryOpt)
if lockErr != nil {
provider.log.Error("Failed to obtain dashboard provisioning lock", "error", lockErr)
return lockErr
}
return errProvisioning
}
// CleanUpOrphanedDashboards deletes provisioned dashboards missing a linked reader.

View File

@@ -10,6 +10,7 @@ import (
"github.com/grafana/dskit/services"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/serverlock"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/registry"
"github.com/grafana/grafana/pkg/services/accesscontrol"
@@ -64,6 +65,7 @@ func ProvideService(
tracer tracing.Tracer,
dual dualwrite.Service,
promTypeMigrationProvider promtypemigration.PromTypeMigrationProvider,
serverLockService *serverlock.ServerLockService,
) (*ProvisioningServiceImpl, error) {
s := &ProvisioningServiceImpl{
Cfg: cfg,
@@ -92,6 +94,7 @@ func ProvideService(
tracer: tracer,
migratePrometheusType: promTypeMigrationProvider.Run,
dual: dual,
serverLock: serverLockService,
}
s.NamedService = services.NewBasicService(s.starting, s.running, nil).WithName(ServiceName)
@@ -166,7 +169,7 @@ func (ps *ProvisioningServiceImpl) running(ctx context.Context) error {
func (ps *ProvisioningServiceImpl) setDashboardProvisioner() error {
dashboardPath := filepath.Join(ps.Cfg.ProvisioningPath, "dashboards")
dashProvisioner, err := ps.newDashboardProvisioner(context.Background(), dashboardPath, ps.dashboardProvisioningService, ps.orgService, ps.dashboardService, ps.folderService, ps.dual)
dashProvisioner, err := ps.newDashboardProvisioner(context.Background(), dashboardPath, ps.dashboardProvisioningService, ps.Cfg, ps.orgService, ps.dashboardService, ps.folderService, ps.dual, ps.serverLock)
if err != nil {
return fmt.Errorf("%v: %w", "Failed to create provisioner", err)
}
@@ -242,6 +245,7 @@ type ProvisioningServiceImpl struct {
resourcePermissions accesscontrol.ReceiverPermissionsService
tracer tracing.Tracer
dual dualwrite.Service
serverLock *serverlock.ServerLockService
migratePrometheusType func(context.Context) error
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/infra/serverlock"
dashboardstore "github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/folder"
"github.com/grafana/grafana/pkg/services/org"
@@ -20,6 +21,7 @@ import (
"github.com/grafana/grafana/pkg/services/provisioning/datasources"
"github.com/grafana/grafana/pkg/services/provisioning/utils"
"github.com/grafana/grafana/pkg/services/searchV2"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/storage/legacysql/dualwrite"
)
@@ -160,7 +162,7 @@ func setup(t *testing.T) *serviceTestStruct {
searchStub := searchV2.NewStubSearchService()
service, err := newProvisioningServiceImpl(
func(context.Context, string, dashboardstore.DashboardProvisioningService, org.Service, utils.DashboardStore, folder.Service, dualwrite.Service) (dashboards.DashboardProvisioner, error) {
func(context.Context, string, dashboardstore.DashboardProvisioningService, *setting.Cfg, org.Service, utils.DashboardStore, folder.Service, dualwrite.Service, *serverlock.ServerLockService) (dashboards.DashboardProvisioner, error) {
serviceTest.dashboardProvisionerInstantiations++
return serviceTest.mock, nil
},

View File

@@ -150,6 +150,11 @@ type Cfg struct {
PluginsPath string
EnterpriseLicensePath string
// Classic Provisioning settings
ClassicProvisioningDashboardsServerLockMaxIntervalSeconds int64
ClassicProvisioningDashboardsServerLockMinWaitMs int64
ClassicProvisioningDashboardsServerLockMaxWaitMs int64
// SMTP email settings
Smtp SmtpSettings
@@ -242,11 +247,12 @@ type Cfg struct {
MetricsGrafanaEnvironmentInfo map[string]string
// Dashboards
DashboardVersionsToKeep int
MinRefreshInterval string
DefaultHomeDashboardPath string
DashboardPerformanceMetrics []string
PanelSeriesLimit int
DashboardVersionsToKeep int
MinRefreshInterval string
DefaultHomeDashboardPath string
DashboardPerformanceMetrics []string
PanelSeriesLimit int
DashboardSchemaMigrationCacheTTL time.Duration
// Auth
LoginCookieName string
@@ -1216,6 +1222,8 @@ func (cfg *Cfg) parseINIFile(iniFile *ini.File) error {
return err
}
cfg.readClassicProvisioningSettings(iniFile)
// read dashboard settings
dashboards := iniFile.Section("dashboards")
cfg.DashboardVersionsToKeep = dashboards.Key("versions_to_keep").MustInt(20)
@@ -1223,6 +1231,7 @@ func (cfg *Cfg) parseINIFile(iniFile *ini.File) error {
cfg.DefaultHomeDashboardPath = dashboards.Key("default_home_dashboard_path").MustString("")
cfg.DashboardPerformanceMetrics = util.SplitString(dashboards.Key("dashboard_performance_metrics").MustString(""))
cfg.PanelSeriesLimit = dashboards.Key("panel_series_limit").MustInt(0)
cfg.DashboardSchemaMigrationCacheTTL = dashboards.Key("schema_migration_cache_ttl").MustDuration(time.Minute)
if err := readUserSettings(iniFile, cfg); err != nil {
return err
@@ -2097,6 +2106,12 @@ func (cfg *Cfg) readLiveSettings(iniFile *ini.File) error {
return nil
}
func (cfg *Cfg) readClassicProvisioningSettings(iniFile *ini.File) {
cfg.ClassicProvisioningDashboardsServerLockMinWaitMs = iniFile.Section("classic_provisioning").Key("dashboards_server_lock_min_wait_ms").MustInt64(100)
cfg.ClassicProvisioningDashboardsServerLockMaxWaitMs = iniFile.Section("classic_provisioning").Key("dashboards_server_lock_max_wait_ms").MustInt64(1000)
cfg.ClassicProvisioningDashboardsServerLockMaxIntervalSeconds = iniFile.Section("classic_provisioning").Key("dashboards_server_lock_max_interval_seconds").MustInt64(15)
}
func (cfg *Cfg) readProvisioningSettings(iniFile *ini.File) error {
provisioning := valueAsString(iniFile.Section("paths"), "provisioning", "")
cfg.ProvisioningPath = makeAbsolute(provisioning, cfg.HomePath)

View File

@@ -54,6 +54,9 @@ func NewRESTOptionsGetterMemory(originalStorageConfig storagebackend.Config, sec
// Create BadgerDB with in-memory mode
db, err := badger.Open(badger.DefaultOptions("").
WithInMemory(true).
WithMemTableSize(256 << 10). // 256KB memtable size
WithValueThreshold(16 << 10). // 16KB threshold for storing values in LSM vs value log
WithNumMemtables(2). // Keep only 2 memtables in memory
WithLogger(nil))
if err != nil {
return nil, err

View File

@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"github.com/grafana/dskit/backoff"
"github.com/grafana/grafana-app-sdk/logging"
gocache "github.com/patrickmn/go-cache"
@@ -13,8 +14,8 @@ import (
const (
defaultLookbackPeriod = 30 * time.Second
defaultPollInterval = 100 * time.Millisecond
defaultEventCacheSize = 10000
defaultMinBackoff = 100 * time.Millisecond
defaultMaxBackoff = 5 * time.Second
defaultBufferSize = 10000
)
@@ -29,15 +30,17 @@ type notifierOptions struct {
type watchOptions struct {
LookbackPeriod time.Duration // How far back to look for events
PollInterval time.Duration // How often to poll for new events
BufferSize int // How many events to buffer
MinBackoff time.Duration // Minimum interval between polling requests
MaxBackoff time.Duration // Maximum interval between polling requests
}
func defaultWatchOptions() watchOptions {
return watchOptions{
LookbackPeriod: defaultLookbackPeriod,
PollInterval: defaultPollInterval,
BufferSize: defaultBufferSize,
MinBackoff: defaultMinBackoff,
MaxBackoff: defaultMaxBackoff,
}
}
@@ -62,9 +65,13 @@ func (n *notifier) cacheKey(evt Event) string {
}
func (n *notifier) Watch(ctx context.Context, opts watchOptions) <-chan Event {
if opts.PollInterval <= 0 {
opts.PollInterval = defaultPollInterval
if opts.MinBackoff <= 0 {
opts.MinBackoff = defaultMinBackoff
}
if opts.MaxBackoff <= 0 || opts.MaxBackoff <= opts.MinBackoff {
opts.MaxBackoff = defaultMaxBackoff
}
cacheTTL := opts.LookbackPeriod
cacheCleanupInterval := 2 * opts.LookbackPeriod
@@ -81,11 +88,21 @@ func (n *notifier) Watch(ctx context.Context, opts watchOptions) <-chan Event {
go func() {
defer close(events)
// Initialize backoff with minimum backoff interval
currentInterval := opts.MinBackoff
backoffConfig := backoff.Config{
MinBackoff: opts.MinBackoff,
MaxBackoff: opts.MaxBackoff,
MaxRetries: 0, // infinite retries
}
bo := backoff.New(ctx, backoffConfig)
for {
select {
case <-ctx.Done():
return
case <-time.After(opts.PollInterval):
case <-time.After(currentInterval):
foundEvents := false
for evt, err := range n.eventStore.ListSince(ctx, subtractDurationFromSnowflake(lastRV, opts.LookbackPeriod)) {
if err != nil {
n.log.Error("Failed to list events since", "error", err)
@@ -102,6 +119,7 @@ func (n *notifier) Watch(ctx context.Context, opts watchOptions) <-chan Event {
continue
}
foundEvents = true
if evt.ResourceVersion > lastRV {
lastRV = evt.ResourceVersion + 1
}
@@ -113,6 +131,14 @@ func (n *notifier) Watch(ctx context.Context, opts watchOptions) <-chan Event {
return
}
}
// Apply backoff logic: reset to min when events are found, increase when no events
if foundEvents {
bo.Reset()
currentInterval = opts.MinBackoff
} else {
currentInterval = bo.NextDelay()
}
}
}
}()

View File

@@ -32,7 +32,6 @@ func TestDefaultWatchOptions(t *testing.T) {
opts := defaultWatchOptions()
assert.Equal(t, defaultLookbackPeriod, opts.LookbackPeriod)
assert.Equal(t, defaultPollInterval, opts.PollInterval)
assert.Equal(t, defaultBufferSize, opts.BufferSize)
}
@@ -158,8 +157,9 @@ func TestNotifier_Watch_NoEvents(t *testing.T) {
opts := watchOptions{
LookbackPeriod: 100 * time.Millisecond,
PollInterval: 50 * time.Millisecond,
BufferSize: 10,
MinBackoff: 50 * time.Millisecond,
MaxBackoff: 500 * time.Millisecond,
}
events := notifier.Watch(ctx, opts)
@@ -210,8 +210,9 @@ func TestNotifier_Watch_WithExistingEvents(t *testing.T) {
opts := watchOptions{
LookbackPeriod: 100 * time.Millisecond,
PollInterval: 50 * time.Millisecond,
BufferSize: 10,
MinBackoff: 50 * time.Millisecond,
MaxBackoff: 500 * time.Millisecond,
}
// Start watching
@@ -265,8 +266,9 @@ func TestNotifier_Watch_EventDeduplication(t *testing.T) {
opts := watchOptions{
LookbackPeriod: time.Second,
PollInterval: 20 * time.Millisecond,
BufferSize: 10,
MinBackoff: 20 * time.Millisecond,
MaxBackoff: 200 * time.Millisecond,
}
// Start watching
@@ -326,8 +328,9 @@ func TestNotifier_Watch_ContextCancellation(t *testing.T) {
opts := watchOptions{
LookbackPeriod: 100 * time.Millisecond,
PollInterval: 20 * time.Millisecond,
BufferSize: 10,
MinBackoff: 20 * time.Millisecond,
MaxBackoff: 200 * time.Millisecond,
}
events := notifier.Watch(ctx, opts)
@@ -369,8 +372,9 @@ func TestNotifier_Watch_MultipleEvents(t *testing.T) {
opts := watchOptions{
LookbackPeriod: time.Second,
PollInterval: 20 * time.Millisecond,
BufferSize: 10,
MinBackoff: 20 * time.Millisecond,
MaxBackoff: 200 * time.Millisecond,
}
// Start watching

View File

@@ -1057,10 +1057,6 @@ func (s *server) List(ctx context.Context, req *resourcepb.ListRequest) (*resour
return err
}
item := &resourcepb.ResourceWrapper{
ResourceVersion: iter.ResourceVersion(),
Value: iter.Value(),
}
// Trash is only accessible to admins or the user who deleted the object
if req.Source == resourcepb.ListRequest_TRASH {
if !s.isTrashItemAuthorized(ctx, iter, trashChecker) {
@@ -1070,6 +1066,11 @@ func (s *server) List(ctx context.Context, req *resourcepb.ListRequest) (*resour
continue
}
item := &resourcepb.ResourceWrapper{
ResourceVersion: iter.ResourceVersion(),
Value: iter.Value(),
}
pageBytes += len(item.Value)
rsp.Items = append(rsp.Items, item)
if (req.Limit > 0 && len(rsp.Items) >= int(req.Limit)) || pageBytes >= maxPageBytes {

View File

@@ -293,6 +293,197 @@ func TestIntegrationLegacySupport(t *testing.T) {
require.Equal(t, 406, rsp.Response.StatusCode) // not acceptable
}
func TestIntegrationListPagination(t *testing.T) {
testutil.SkipIntegrationTestInShortMode(t)
gvr := schema.GroupVersionResource{
Group: dashboardV0.GROUP,
Version: dashboardV0.VERSION,
Resource: "dashboards",
}
// Test on modes with legacy
modes := []rest.DualWriterMode{rest.Mode1, rest.Mode2, rest.Mode3}
for _, mode := range modes {
t.Run(fmt.Sprintf("pagination with dual writer mode %d", mode), func(t *testing.T) {
helper := apis.NewK8sTestHelper(t, testinfra.GrafanaOpts{
DisableAnonymous: true,
UnifiedStorageConfig: map[string]setting.UnifiedStorageConfig{
"dashboards.dashboard.grafana.app": {
DualWriterMode: mode,
},
},
})
t.Cleanup(helper.Shutdown)
ctx := context.Background()
client := helper.GetResourceClient(apis.ResourceClientArgs{
User: helper.Org1.Admin,
GVR: gvr,
})
// Test 1: List with no dashboards
rsp, err := client.Resource.List(ctx, metav1.ListOptions{})
require.NoError(t, err)
require.Len(t, rsp.Items, 0)
// Create 5 dashboards to test pagination with small limits
const totalDashboards = 5
createdNames := make([]string, 0, totalDashboards)
for i := 0; i < totalDashboards; i++ {
obj := &unstructured.Unstructured{
Object: map[string]interface{}{
"spec": map[string]any{
"title": fmt.Sprintf("Pagination test dashboard %d", i),
"schemaVersion": 42,
},
},
}
obj.SetGenerateName("pag-")
obj.SetAPIVersion(gvr.GroupVersion().String())
obj.SetKind("Dashboard")
created, err := client.Resource.Create(ctx, obj, metav1.CreateOptions{})
require.NoError(t, err)
createdNames = append(createdNames, created.GetName())
}
// Test 2: List all without limit - should return all dashboards
rsp, err = client.Resource.List(ctx, metav1.ListOptions{})
require.NoError(t, err)
require.Len(t, rsp.Items, totalDashboards, "should return all %d dashboards", totalDashboards)
// Test 3: List with small limit (2) - should paginate
const pageSize = 2
allNames := make(map[string]bool)
continueToken := ""
pageCount := 0
for {
pageCount++
listOpts := metav1.ListOptions{
Limit: pageSize,
Continue: continueToken,
}
rsp, err = client.Resource.List(ctx, listOpts)
require.NoError(t, err)
// Collect names from this page
for _, item := range rsp.Items {
name := item.GetName()
require.False(t, allNames[name], "duplicate item %s found across pages", name)
allNames[name] = true
}
// Check if there's more pages
continueToken = rsp.GetContinue()
if continueToken == "" {
break
}
// Safety check to prevent infinite loops
require.Less(t, pageCount, 5)
}
// Verify we got all dashboards across all pages
require.Len(t, allNames, totalDashboards, "should have collected all %d dashboards across pages", totalDashboards)
// Verify all created dashboards were found
for _, name := range createdNames {
require.True(t, allNames[name], "dashboard %s not found in paginated results", name)
}
})
t.Run(fmt.Sprintf("history pagination with dual writer mode %d", mode), func(t *testing.T) {
helper := apis.NewK8sTestHelper(t, testinfra.GrafanaOpts{
DisableAnonymous: true,
UnifiedStorageConfig: map[string]setting.UnifiedStorageConfig{
"dashboards.dashboard.grafana.app": {
DualWriterMode: mode,
},
},
})
t.Cleanup(helper.Shutdown)
ctx := context.Background()
client := helper.GetResourceClient(apis.ResourceClientArgs{
User: helper.Org1.Admin,
GVR: gvr,
})
// Create a dashboard
obj := &unstructured.Unstructured{
Object: map[string]interface{}{
"spec": map[string]any{
"title": "History pagination test dashboard",
"schemaVersion": 42,
},
},
}
obj.SetGenerateName("hist-")
obj.SetAPIVersion(gvr.GroupVersion().String())
obj.SetKind("Dashboard")
created, err := client.Resource.Create(ctx, obj, metav1.CreateOptions{})
require.NoError(t, err)
dashName := created.GetName()
// Update the dashboard multiple times to create history entries
const totalVersions = 5
for i := 1; i < totalVersions; i++ {
// Get latest version
current, err := client.Resource.Get(ctx, dashName, metav1.GetOptions{})
require.NoError(t, err)
// Update title
spec := current.Object["spec"].(map[string]interface{})
spec["title"] = fmt.Sprintf("History pagination test dashboard v%d", i+1)
current.Object["spec"] = spec
_, err = client.Resource.Update(ctx, current, metav1.UpdateOptions{})
require.NoError(t, err)
}
// Test: List history with pagination
labelSelector := utils.LabelKeyGetHistory + "=true"
fieldSelector := "metadata.name=" + dashName
const pageSize int64 = 2
allVersions := make([]string, 0)
continueToken := ""
pageCount := 0
for {
pageCount++
listOpts := metav1.ListOptions{
LabelSelector: labelSelector,
FieldSelector: fieldSelector,
Limit: pageSize,
Continue: continueToken,
}
rsp, err := client.Resource.List(ctx, listOpts)
require.NoError(t, err)
// Collect resource versions from this page
for _, item := range rsp.Items {
rv := item.GetResourceVersion()
allVersions = append(allVersions, rv)
}
// Check if there's more pages
continueToken = rsp.GetContinue()
if continueToken == "" {
break
}
// Safety check to prevent infinite loops
require.Less(t, pageCount, 5)
}
// Verify we got all history versions
require.Len(t, allVersions, totalVersions, "should have collected all %d history versions across pages", totalVersions)
})
}
}
func TestIntegrationSearchTypeFiltering(t *testing.T) {
testutil.SkipIntegrationTestInShortMode(t)