mirror of
https://github.com/grafana/grafana.git
synced 2026-01-14 21:25:50 +00:00
Compare commits
22 Commits
mtff/enume
...
feature/ex
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b0bb71f834 | ||
|
|
a7aa55f908 | ||
|
|
0a61846b5e | ||
|
|
785cc739ee | ||
|
|
4913baaf04 | ||
|
|
c8f1efe7c7 | ||
|
|
b91ca14f48 | ||
|
|
2aedbdb76f | ||
|
|
0d7f46c08a | ||
|
|
1b52718c23 | ||
|
|
e61e406440 | ||
|
|
5cb4c311dc | ||
|
|
586410d8b5 | ||
|
|
a0e894c6d8 | ||
|
|
e4796b1de3 | ||
|
|
86a3aae204 | ||
|
|
e0ad4eb7ed | ||
|
|
f0c95a0a10 | ||
|
|
0b46123300 | ||
|
|
81b868ae91 | ||
|
|
2a6a48ac39 | ||
|
|
f581a5a69b |
@@ -25,7 +25,7 @@ Plugin signature verification, also known as _signing_, is a security measure to
|
||||
|
||||
Learn more at [plugin policies](https://grafana.com/legal/plugins/).
|
||||
|
||||
## How does verifiction work?
|
||||
## How does verification work?
|
||||
|
||||
At startup, Grafana verifies the signatures of every plugin in the plugin directory.
|
||||
|
||||
|
||||
@@ -66,7 +66,6 @@ Most [generally available](https://grafana.com/docs/release-life-cycle/#general-
|
||||
| `sharingDashboardImage` | Enables image sharing functionality for dashboards | Yes |
|
||||
| `tabularNumbers` | Use fixed-width numbers globally in the UI | |
|
||||
| `azureResourcePickerUpdates` | Enables the updated Azure Monitor resource picker | Yes |
|
||||
| `tempoSearchBackendMigration` | Run search queries through the tempo backend | |
|
||||
| `opentsdbBackendMigration` | Run queries through the data source backend | |
|
||||
|
||||
## Public preview feature toggles
|
||||
|
||||
@@ -1021,11 +1021,6 @@
|
||||
"count": 2
|
||||
}
|
||||
},
|
||||
"public/app/core/actions/index.ts": {
|
||||
"no-barrel-files/no-barrel-files": {
|
||||
"count": 4
|
||||
}
|
||||
},
|
||||
"public/app/core/components/AccessControl/PermissionList.tsx": {
|
||||
"no-restricted-syntax": {
|
||||
"count": 1
|
||||
@@ -2610,11 +2605,6 @@
|
||||
"count": 2
|
||||
}
|
||||
},
|
||||
"public/app/features/explore/hooks/useStateSync/index.ts": {
|
||||
"no-barrel-files/no-barrel-files": {
|
||||
"count": 1
|
||||
}
|
||||
},
|
||||
"public/app/features/explore/spec/helper/setup.tsx": {
|
||||
"@typescript-eslint/no-explicit-any": {
|
||||
"count": 1
|
||||
@@ -4030,11 +4020,6 @@
|
||||
"count": 1
|
||||
}
|
||||
},
|
||||
"public/app/plugins/datasource/parca/webpack.config.ts": {
|
||||
"no-barrel-files/no-barrel-files": {
|
||||
"count": 1
|
||||
}
|
||||
},
|
||||
"public/app/plugins/datasource/prometheus/configuration/AzureAuthSettings.tsx": {
|
||||
"no-restricted-syntax": {
|
||||
"count": 1
|
||||
@@ -4103,7 +4088,7 @@
|
||||
"count": 1
|
||||
},
|
||||
"@typescript-eslint/no-explicit-any": {
|
||||
"count": 2
|
||||
"count": 1
|
||||
}
|
||||
},
|
||||
"public/app/plugins/datasource/tempo/resultTransformer.ts": {
|
||||
|
||||
@@ -585,6 +585,8 @@ module.exports = [
|
||||
// FIXME: Remove once all enterprise issues are fixed -
|
||||
// we don't have a suppressions file/approach for enterprise code yet
|
||||
...enterpriseIgnores,
|
||||
// Ignore decoupled plugin webpack configs
|
||||
'public/app/**/webpack.config.ts',
|
||||
],
|
||||
rules: {
|
||||
'no-barrel-files/no-barrel-files': 'error',
|
||||
|
||||
@@ -246,6 +246,8 @@ const injectedRtkApi = api
|
||||
facetLimit: queryArg.facetLimit,
|
||||
tags: queryArg.tags,
|
||||
libraryPanel: queryArg.libraryPanel,
|
||||
panelType: queryArg.panelType,
|
||||
dataSourceType: queryArg.dataSourceType,
|
||||
permission: queryArg.permission,
|
||||
sort: queryArg.sort,
|
||||
limit: queryArg.limit,
|
||||
@@ -674,6 +676,10 @@ export type SearchDashboardsAndFoldersApiArg = {
|
||||
tags?: string[];
|
||||
/** find dashboards that reference a given libraryPanel */
|
||||
libraryPanel?: string;
|
||||
/** find dashboards using panels of a given plugin type */
|
||||
panelType?: string;
|
||||
/** find dashboards using datasources of a given plugin type */
|
||||
dataSourceType?: string;
|
||||
/** permission needed for the resource (view, edit, admin) */
|
||||
permission?: 'view' | 'edit' | 'admin';
|
||||
/** sortable field */
|
||||
|
||||
@@ -657,10 +657,6 @@ export interface FeatureToggles {
|
||||
*/
|
||||
rolePickerDrawer?: boolean;
|
||||
/**
|
||||
* Enable unified storage search
|
||||
*/
|
||||
unifiedStorageSearch?: boolean;
|
||||
/**
|
||||
* Enable sprinkles on unified storage search
|
||||
*/
|
||||
unifiedStorageSearchSprinkles?: boolean;
|
||||
@@ -957,7 +953,8 @@ export interface FeatureToggles {
|
||||
*/
|
||||
alertingBulkActionsInUI?: boolean;
|
||||
/**
|
||||
* Registers AuthZ /apis endpoint
|
||||
* Deprecated: Use kubernetesAuthzCoreRolesApi, kubernetesAuthzRolesApi, and kubernetesAuthzRoleBindingsApi instead
|
||||
* @deprecated
|
||||
*/
|
||||
kubernetesAuthzApis?: boolean;
|
||||
/**
|
||||
@@ -973,6 +970,18 @@ export interface FeatureToggles {
|
||||
*/
|
||||
kubernetesAuthzZanzanaSync?: boolean;
|
||||
/**
|
||||
* Registers AuthZ Core Roles /apis endpoint
|
||||
*/
|
||||
kubernetesAuthzCoreRolesApi?: boolean;
|
||||
/**
|
||||
* Registers AuthZ Roles /apis endpoint
|
||||
*/
|
||||
kubernetesAuthzRolesApi?: boolean;
|
||||
/**
|
||||
* Registers AuthZ Role Bindings /apis endpoint
|
||||
*/
|
||||
kubernetesAuthzRoleBindingsApi?: boolean;
|
||||
/**
|
||||
* Enables create, delete, and update mutations for resources owned by IAM identity
|
||||
*/
|
||||
kubernetesAuthnMutation?: boolean;
|
||||
@@ -1124,11 +1133,6 @@ export interface FeatureToggles {
|
||||
*/
|
||||
pluginContainers?: boolean;
|
||||
/**
|
||||
* Run search queries through the tempo backend
|
||||
* @default false
|
||||
*/
|
||||
tempoSearchBackendMigration?: boolean;
|
||||
/**
|
||||
* Prioritize loading plugins from the CDN before other sources
|
||||
* @default false
|
||||
*/
|
||||
|
||||
@@ -52,6 +52,7 @@ export const availableIconsIndex = {
|
||||
bookmark: true,
|
||||
'book-open': true,
|
||||
'brackets-curly': true,
|
||||
brain: true,
|
||||
'browser-alt': true,
|
||||
bug: true,
|
||||
building: true,
|
||||
|
||||
@@ -153,6 +153,10 @@ interface BaseProps<TableData extends object> {
|
||||
* Optional way to set how the table is sorted from the beginning. Must be memoized.
|
||||
*/
|
||||
initialSortBy?: Array<SortingRule<TableData>>;
|
||||
/**
|
||||
* Disable the ability to remove sorting on columns (none -> asc -> desc -> asc)
|
||||
*/
|
||||
disableSortRemove?: boolean;
|
||||
}
|
||||
|
||||
interface WithExpandableRow<TableData extends object> extends BaseProps<TableData> {
|
||||
@@ -191,6 +195,7 @@ export function InteractiveTable<TableData extends object>({
|
||||
showExpandAll = false,
|
||||
fetchData,
|
||||
initialSortBy = [],
|
||||
disableSortRemove,
|
||||
}: Props<TableData>) {
|
||||
const styles = useStyles2(getStyles);
|
||||
const tableColumns = useMemo(() => {
|
||||
@@ -222,6 +227,7 @@ export function InteractiveTable<TableData extends object>({
|
||||
disableMultiSort: true,
|
||||
// If fetchData is provided, we disable client-side sorting
|
||||
manualSortBy: Boolean(fetchData),
|
||||
disableSortRemove,
|
||||
getRowId,
|
||||
initialState: {
|
||||
hiddenColumns: [
|
||||
|
||||
@@ -26,4 +26,8 @@ export interface Column<TableData extends object> {
|
||||
* If the provided function returns `false` the column will be hidden.
|
||||
*/
|
||||
visible?: (data: TableData[]) => boolean;
|
||||
/**
|
||||
* Determines starting sort direction when the column header is clicked.
|
||||
*/
|
||||
sortDescFirst?: boolean;
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ export function getColumns<K extends object>(
|
||||
disableSortBy: !Boolean(column.sortType),
|
||||
width: column.disableGrow ? 0 : undefined,
|
||||
visible: column.visible,
|
||||
...(column.sortDescFirst !== undefined && { sortDescFirst: column.sortDescFirst }),
|
||||
...(column.cell && { Cell: column.cell }),
|
||||
})),
|
||||
];
|
||||
|
||||
@@ -1,26 +1,55 @@
|
||||
package generic
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
"k8s.io/apiserver/pkg/registry/generic/registry"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
)
|
||||
|
||||
// SelectableFieldsOptions allows customizing field selector behavior for a resource.
|
||||
type SelectableFieldsOptions struct {
|
||||
// GetAttrs returns labels and fields for the object.
|
||||
// If nil, the default GetAttrs is used which only exposes metadata.name.
|
||||
GetAttrs func(obj runtime.Object) (labels.Set, fields.Set, error)
|
||||
}
|
||||
|
||||
func NewRegistryStore(scheme *runtime.Scheme, resourceInfo utils.ResourceInfo, optsGetter generic.RESTOptionsGetter) (*registry.Store, error) {
|
||||
return NewRegistryStoreWithSelectableFields(scheme, resourceInfo, optsGetter, SelectableFieldsOptions{})
|
||||
}
|
||||
|
||||
// NewRegistryStoreWithSelectableFields creates a registry store with custom selectable fields support.
|
||||
// Use this when you need to filter resources by custom fields like spec.connection.name.
|
||||
func NewRegistryStoreWithSelectableFields(scheme *runtime.Scheme, resourceInfo utils.ResourceInfo, optsGetter generic.RESTOptionsGetter, fieldOpts SelectableFieldsOptions) (*registry.Store, error) {
|
||||
gv := resourceInfo.GroupVersion()
|
||||
gv.Version = runtime.APIVersionInternal
|
||||
strategy := NewStrategy(scheme, gv)
|
||||
if resourceInfo.IsClusterScoped() {
|
||||
strategy = strategy.WithClusterScope()
|
||||
}
|
||||
|
||||
// Use custom GetAttrs if provided, otherwise use default
|
||||
var attrFunc storage.AttrFunc
|
||||
var predicateFunc func(label labels.Selector, field fields.Selector) storage.SelectionPredicate
|
||||
if fieldOpts.GetAttrs != nil {
|
||||
attrFunc = fieldOpts.GetAttrs
|
||||
// Pass nil predicateFunc to use default behavior with custom attrFunc
|
||||
predicateFunc = nil
|
||||
} else {
|
||||
attrFunc = GetAttrs
|
||||
predicateFunc = Matcher
|
||||
}
|
||||
|
||||
store := ®istry.Store{
|
||||
NewFunc: resourceInfo.NewFunc,
|
||||
NewListFunc: resourceInfo.NewListFunc,
|
||||
KeyRootFunc: KeyRootFunc(resourceInfo.GroupResource()),
|
||||
KeyFunc: NamespaceKeyFunc(resourceInfo.GroupResource()),
|
||||
PredicateFunc: Matcher,
|
||||
PredicateFunc: predicateFunc,
|
||||
DefaultQualifiedResource: resourceInfo.GroupResource(),
|
||||
SingularQualifiedResource: resourceInfo.SingularGroupResource(),
|
||||
TableConvertor: resourceInfo.TableConverter(),
|
||||
@@ -28,7 +57,7 @@ func NewRegistryStore(scheme *runtime.Scheme, resourceInfo utils.ResourceInfo, o
|
||||
UpdateStrategy: strategy,
|
||||
DeleteStrategy: strategy,
|
||||
}
|
||||
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: GetAttrs}
|
||||
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: attrFunc}
|
||||
if err := store.CompleteWithOptions(options); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
20
pkg/build/e2e/README.md
Normal file
20
pkg/build/e2e/README.md
Normal file
@@ -0,0 +1,20 @@
|
||||
## Build artifacts
|
||||
|
||||
Put the resulting tar in your `grafana` OSS path:
|
||||
```sh
|
||||
go -C grafana run ./pkg/build/cmd artifacts -a targz:enterprise:linux/amd64 --alpine-base=alpine:3.22 --tag-format='{{ .version }}-{{ .buildID }}-{{ .arch }}' --grafana-dir="${PWD}/grafana" --enterprise-dir="${PWD}/grafana-enterprise"
|
||||
```
|
||||
|
||||
Also build the e2e test runner:
|
||||
```sh
|
||||
GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o ./e2e-runner ./e2e/
|
||||
```
|
||||
|
||||
And then `chmod +x ./e2e-runner`.
|
||||
|
||||
## Running tests
|
||||
|
||||
Reporting tests with Image Renderer:
|
||||
```sh
|
||||
go run ./pkg/build/e2e --suite=e2e/extensions/enterprise/smtp-suite --license=e2e/extensions/enterprise/license.jwt --image-renderer
|
||||
```
|
||||
@@ -138,6 +138,10 @@ func run(ctx context.Context, cmd *cli.Command) error {
|
||||
}
|
||||
|
||||
if code != 0 {
|
||||
if stdout, _ := c.Stdout(ctx); len(stdout) > 0 {
|
||||
log.Printf("e2e test suite stdout:\n%s", stdout)
|
||||
}
|
||||
|
||||
return fmt.Errorf("e2e tests failed with exit code %d", code)
|
||||
}
|
||||
|
||||
|
||||
@@ -8,10 +8,10 @@ import (
|
||||
|
||||
func RunSuite(d *dagger.Client, svc *dagger.Service, src *dagger.Directory, cache *dagger.CacheVolume, suite, runnerFlags string) *dagger.Container {
|
||||
command := fmt.Sprintf(
|
||||
"./e2e-runner cypress --start-grafana=false --cypress-video"+
|
||||
"./e2e-runner cypress --browser=electron --start-grafana=false --cypress-video"+
|
||||
" --grafana-base-url http://grafana:3001 --suite %s %s", suite, runnerFlags)
|
||||
|
||||
return WithYarnCache(WithGrafanaFrontend(d.Container().From("cypress/included:13.1.0"), src), cache).
|
||||
return WithYarnCache(WithGrafanaFrontend(d.Container().From("cypress/included:14.3.2"), src), cache).
|
||||
WithWorkdir("/src").
|
||||
WithServiceBinding("grafana", svc).
|
||||
WithExec([]string{"yarn", "install", "--immutable"}).
|
||||
|
||||
@@ -99,13 +99,15 @@ func GrafanaService(ctx context.Context, d *dagger.Client, opts GrafanaServiceOp
|
||||
}
|
||||
|
||||
if opts.StartImageRenderer {
|
||||
container = container.WithEnvVariable("START_IMAGE_RENDERER", "true").
|
||||
WithExec([]string{"apt-get", "update"}).
|
||||
WithExec([]string{"apt-get", "install", "-y", "ca-certificates"})
|
||||
imageRendererSvc := d.Container().From("grafana/grafana-image-renderer:" + opts.ImageRendererVersion).
|
||||
WithExposedPort(8081).
|
||||
AsService()
|
||||
|
||||
if opts.ImageRendererVersion != "" {
|
||||
container = container.WithEnvVariable("IMAGE_RENDERER_VERSION", opts.ImageRendererVersion)
|
||||
}
|
||||
container = container.WithServiceBinding("image-renderer", imageRendererSvc).
|
||||
WithExec([]string{"apt-get", "update"}).
|
||||
WithExec([]string{"apt-get", "install", "-y", "ca-certificates"}).
|
||||
WithEnvVariable("GF_RENDERING_CALLBACK_URL", "http://grafana:3001/").
|
||||
WithEnvVariable("GF_RENDERING_SERVER_URL", "http://image-renderer:8081/render")
|
||||
}
|
||||
|
||||
// We add all GF_ environment variables to allow for overriding Grafana configuration.
|
||||
|
||||
@@ -10,6 +10,7 @@ const (
|
||||
SearchServerRing string = "search-server-ring"
|
||||
SearchServerDistributor string = "search-server-distributor"
|
||||
StorageServer string = "storage-server"
|
||||
SearchServer string = "search-server"
|
||||
ZanzanaServer string = "zanzana-server"
|
||||
InstrumentationServer string = "instrumentation-server"
|
||||
FrontendServer string = "frontend-server"
|
||||
@@ -21,6 +22,7 @@ var dependencyMap = map[string][]string{
|
||||
SearchServerRing: {InstrumentationServer, MemberlistKV},
|
||||
GrafanaAPIServer: {InstrumentationServer},
|
||||
StorageServer: {InstrumentationServer, SearchServerRing},
|
||||
SearchServer: {InstrumentationServer, SearchServerRing},
|
||||
ZanzanaServer: {InstrumentationServer},
|
||||
SearchServerDistributor: {InstrumentationServer, MemberlistKV, SearchServerRing},
|
||||
Core: {},
|
||||
|
||||
@@ -38,9 +38,9 @@ func (d *directResourceClient) GetBlob(ctx context.Context, in *resourcepb.GetBl
|
||||
return d.server.GetBlob(ctx, in)
|
||||
}
|
||||
|
||||
// GetStats implements ResourceClient.
|
||||
// GetStats implements ResourceClient (SearchClient).
|
||||
func (d *directResourceClient) GetStats(ctx context.Context, in *resourcepb.ResourceStatsRequest, opts ...grpc.CallOption) (*resourcepb.ResourceStatsResponse, error) {
|
||||
return d.server.GetStats(ctx, in)
|
||||
return nil, fmt.Errorf("GetStats not supported with direct resource client")
|
||||
}
|
||||
|
||||
// IsHealthy implements ResourceClient.
|
||||
@@ -53,12 +53,14 @@ func (d *directResourceClient) List(ctx context.Context, in *resourcepb.ListRequ
|
||||
return d.server.List(ctx, in)
|
||||
}
|
||||
|
||||
// ListManagedObjects implements ResourceClient (SearchClient).
|
||||
func (d *directResourceClient) ListManagedObjects(ctx context.Context, in *resourcepb.ListManagedObjectsRequest, opts ...grpc.CallOption) (*resourcepb.ListManagedObjectsResponse, error) {
|
||||
return d.server.ListManagedObjects(ctx, in)
|
||||
return nil, fmt.Errorf("ListManagedObjects not supported with direct resource client")
|
||||
}
|
||||
|
||||
// CountManagedObjects implements ResourceClient (SearchClient).
|
||||
func (d *directResourceClient) CountManagedObjects(ctx context.Context, in *resourcepb.CountManagedObjectsRequest, opts ...grpc.CallOption) (*resourcepb.CountManagedObjectsResponse, error) {
|
||||
return d.server.CountManagedObjects(ctx, in)
|
||||
return nil, fmt.Errorf("CountManagedObjects not supported with direct resource client")
|
||||
}
|
||||
|
||||
// PutBlob implements ResourceClient.
|
||||
@@ -71,9 +73,9 @@ func (d *directResourceClient) Read(ctx context.Context, in *resourcepb.ReadRequ
|
||||
return d.server.Read(ctx, in)
|
||||
}
|
||||
|
||||
// Search implements ResourceClient.
|
||||
// Search implements ResourceClient (SearchClient).
|
||||
func (d *directResourceClient) Search(ctx context.Context, in *resourcepb.ResourceSearchRequest, opts ...grpc.CallOption) (*resourcepb.ResourceSearchResponse, error) {
|
||||
return d.server.Search(ctx, in)
|
||||
return nil, fmt.Errorf("Search not supported with direct resource client")
|
||||
}
|
||||
|
||||
// Update implements ResourceClient.
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
|
||||
"github.com/grafana/authlib/types"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
grafanaregistry "github.com/grafana/grafana/pkg/apiserver/registry/generic"
|
||||
grafanarest "github.com/grafana/grafana/pkg/apiserver/rest"
|
||||
|
||||
@@ -142,6 +142,24 @@ func (s *SearchHandler) GetAPIRoutes(defs map[string]common.OpenAPIDefinition) *
|
||||
Schema: spec.StringProperty(),
|
||||
},
|
||||
},
|
||||
{
|
||||
ParameterProps: spec3.ParameterProps{
|
||||
Name: "panelType",
|
||||
In: "query",
|
||||
Description: "find dashboards using panels of a given plugin type",
|
||||
Required: false,
|
||||
Schema: spec.StringProperty(),
|
||||
},
|
||||
},
|
||||
{
|
||||
ParameterProps: spec3.ParameterProps{
|
||||
Name: "dataSourceType",
|
||||
In: "query",
|
||||
Description: "find dashboards using datasources of a given plugin type",
|
||||
Required: false,
|
||||
Schema: spec.StringProperty(),
|
||||
},
|
||||
},
|
||||
{
|
||||
ParameterProps: spec3.ParameterProps{
|
||||
Name: "permission",
|
||||
@@ -430,14 +448,11 @@ func convertHttpSearchRequestToResourceSearchRequest(queryParams url.Values, use
|
||||
}
|
||||
}
|
||||
|
||||
// The facet term fields
|
||||
// Apply facet terms
|
||||
if facets, ok := queryParams["facet"]; ok {
|
||||
if queryParams.Has("facetLimit") {
|
||||
if parsed, err := strconv.Atoi(queryParams.Get("facetLimit")); err == nil && parsed > 0 {
|
||||
facetLimit = parsed
|
||||
if facetLimit > 1000 {
|
||||
facetLimit = 1000
|
||||
}
|
||||
facetLimit = min(parsed, 1000)
|
||||
}
|
||||
}
|
||||
searchRequest.Facet = make(map[string]*resourcepb.ResourceSearchRequest_Facet)
|
||||
@@ -449,21 +464,35 @@ func convertHttpSearchRequestToResourceSearchRequest(queryParams url.Values, use
|
||||
}
|
||||
}
|
||||
|
||||
// The tags filter
|
||||
if tags, ok := queryParams["tag"]; ok {
|
||||
if v, ok := queryParams["tag"]; ok {
|
||||
searchRequest.Options.Fields = append(searchRequest.Options.Fields, &resourcepb.Requirement{
|
||||
Key: "tags",
|
||||
Operator: "=",
|
||||
Values: tags,
|
||||
Values: v,
|
||||
})
|
||||
}
|
||||
|
||||
// The libraryPanel filter
|
||||
if libraryPanel, ok := queryParams["libraryPanel"]; ok {
|
||||
if v, ok := queryParams["panelType"]; ok {
|
||||
searchRequest.Options.Fields = append(searchRequest.Options.Fields, &resourcepb.Requirement{
|
||||
Key: resource.SEARCH_FIELD_PREFIX + builders.DASHBOARD_PANEL_TYPES,
|
||||
Operator: "=",
|
||||
Values: v,
|
||||
})
|
||||
}
|
||||
|
||||
if v, ok := queryParams["dataSourceType"]; ok {
|
||||
searchRequest.Options.Fields = append(searchRequest.Options.Fields, &resourcepb.Requirement{
|
||||
Key: resource.SEARCH_FIELD_PREFIX + builders.DASHBOARD_DS_TYPES,
|
||||
Operator: "=",
|
||||
Values: v,
|
||||
})
|
||||
}
|
||||
|
||||
if v, ok := queryParams["libraryPanel"]; ok {
|
||||
searchRequest.Options.Fields = append(searchRequest.Options.Fields, &resourcepb.Requirement{
|
||||
Key: builders.DASHBOARD_LIBRARY_PANEL_REFERENCE,
|
||||
Operator: "=",
|
||||
Values: libraryPanel,
|
||||
Values: v,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -37,7 +37,6 @@ import (
|
||||
"github.com/grafana/grafana/pkg/services/folder"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/apistore"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resourcepb"
|
||||
)
|
||||
|
||||
@@ -74,7 +73,7 @@ func RegisterAPIService(cfg *setting.Cfg,
|
||||
acService accesscontrol.Service,
|
||||
accessClient authlib.AccessClient,
|
||||
registerer prometheus.Registerer,
|
||||
unified resource.ResourceClient,
|
||||
unified resourcepb.ResourceIndexClient,
|
||||
zanzanaClient zanzana.Client,
|
||||
) *FolderAPIBuilder {
|
||||
builder := &FolderAPIBuilder{
|
||||
@@ -93,7 +92,7 @@ func RegisterAPIService(cfg *setting.Cfg,
|
||||
return builder
|
||||
}
|
||||
|
||||
func NewAPIService(ac authlib.AccessClient, searcher resource.ResourceClient, features featuremgmt.FeatureToggles, zanzanaClient zanzana.Client, resourcePermissionsSvc *dynamic.NamespaceableResourceInterface) *FolderAPIBuilder {
|
||||
func NewAPIService(ac authlib.AccessClient, searcher resourcepb.ResourceIndexClient, features featuremgmt.FeatureToggles, zanzanaClient zanzana.Client, resourcePermissionsSvc *dynamic.NamespaceableResourceInterface) *FolderAPIBuilder {
|
||||
return &FolderAPIBuilder{
|
||||
features: features,
|
||||
accessClient: ac,
|
||||
|
||||
@@ -5,7 +5,9 @@ import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/open-feature/go-sdk/openfeature"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -209,8 +211,16 @@ func (b *IdentityAccessManagementAPIBuilder) GetGroupVersion() schema.GroupVersi
|
||||
}
|
||||
|
||||
func (b *IdentityAccessManagementAPIBuilder) InstallSchema(scheme *runtime.Scheme) error {
|
||||
//nolint:staticcheck // not yet migrated to OpenFeature
|
||||
if b.features.IsEnabledGlobally(featuremgmt.FlagKubernetesAuthzApis) {
|
||||
client := openfeature.NewDefaultClient()
|
||||
ctx, cancelFn := context.WithTimeout(context.Background(), time.Second*5)
|
||||
defer cancelFn()
|
||||
|
||||
// Check if any of the AuthZ APIs are enabled
|
||||
enableCoreRolesApi := client.Boolean(ctx, featuremgmt.FlagKubernetesAuthzCoreRolesApi, false, openfeature.TransactionContext(ctx))
|
||||
enableRolesApi := client.Boolean(ctx, featuremgmt.FlagKubernetesAuthzRolesApi, false, openfeature.TransactionContext(ctx))
|
||||
enableRoleBindingsApi := client.Boolean(ctx, featuremgmt.FlagKubernetesAuthzRoleBindingsApi, false, openfeature.TransactionContext(ctx))
|
||||
|
||||
if enableCoreRolesApi || enableRolesApi || enableRoleBindingsApi {
|
||||
if err := iamv0.AddAuthZKnownTypes(scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -244,10 +254,16 @@ func (b *IdentityAccessManagementAPIBuilder) AllowedV0Alpha1Resources() []string
|
||||
func (b *IdentityAccessManagementAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver.APIGroupInfo, opts builder.APIGroupOptions) error {
|
||||
storage := map[string]rest.Storage{}
|
||||
|
||||
client := openfeature.NewDefaultClient()
|
||||
ctx, cancelFn := context.WithTimeout(context.Background(), time.Second*5)
|
||||
defer cancelFn()
|
||||
|
||||
//nolint:staticcheck // not yet migrated to OpenFeature
|
||||
enableZanzanaSync := b.features.IsEnabledGlobally(featuremgmt.FlagKubernetesAuthzZanzanaSync)
|
||||
//nolint:staticcheck // not yet migrated to OpenFeature
|
||||
enableAuthzApis := b.features.IsEnabledGlobally(featuremgmt.FlagKubernetesAuthzApis)
|
||||
|
||||
enableCoreRolesApi := client.Boolean(ctx, featuremgmt.FlagKubernetesAuthzCoreRolesApi, false, openfeature.TransactionContext(ctx))
|
||||
enableRolesApi := client.Boolean(ctx, featuremgmt.FlagKubernetesAuthzRolesApi, false, openfeature.TransactionContext(ctx))
|
||||
enableRoleBindingsApi := client.Boolean(ctx, featuremgmt.FlagKubernetesAuthzRoleBindingsApi, false, openfeature.TransactionContext(ctx))
|
||||
|
||||
// teams + users must have shorter names because they are often used as part of another name
|
||||
opts.StorageOptsRegister(iamv0.TeamResourceInfo.GroupResource(), apistore.StorageOptions{
|
||||
@@ -283,17 +299,21 @@ func (b *IdentityAccessManagementAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *ge
|
||||
return err
|
||||
}
|
||||
|
||||
if enableAuthzApis {
|
||||
if enableCoreRolesApi {
|
||||
// v0alpha1
|
||||
if err := b.UpdateCoreRolesAPIGroup(apiGroupInfo, opts, storage, enableZanzanaSync); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if enableRolesApi {
|
||||
// Role registration is delegated to the RoleApiInstaller
|
||||
if err := b.roleApiInstaller.RegisterStorage(apiGroupInfo, &opts, storage); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if enableRoleBindingsApi {
|
||||
if err := b.UpdateRoleBindingsAPIGroup(apiGroupInfo, opts, storage, enableZanzanaSync); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -722,7 +742,7 @@ func NewLocalStore(resourceInfo utils.ResourceInfo, scheme *runtime.Scheme, defa
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := resource.NewLocalResourceClient(server)
|
||||
client := resource.NewLocalResourceClient(server, nil)
|
||||
optsGetter := apistore.NewRESTOptionsGetterForClient(client, nil, defaultOpts.StorageConfig.Config, nil)
|
||||
|
||||
store, err := grafanaregistry.NewRegistryStore(scheme, resourceInfo, optsGetter)
|
||||
|
||||
@@ -71,6 +71,98 @@ func (_c *MockJobProgressRecorder_Complete_Call) RunAndReturn(run func(context.C
|
||||
return _c
|
||||
}
|
||||
|
||||
// HasDirPathFailedDeletion provides a mock function with given fields: folderPath
|
||||
func (_m *MockJobProgressRecorder) HasDirPathFailedDeletion(folderPath string) bool {
|
||||
ret := _m.Called(folderPath)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for HasDirPathFailedDeletion")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(string) bool); ok {
|
||||
r0 = rf(folderPath)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockJobProgressRecorder_HasDirPathFailedDeletion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HasDirPathFailedDeletion'
|
||||
type MockJobProgressRecorder_HasDirPathFailedDeletion_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// HasDirPathFailedDeletion is a helper method to define mock.On call
|
||||
// - folderPath string
|
||||
func (_e *MockJobProgressRecorder_Expecter) HasDirPathFailedDeletion(folderPath interface{}) *MockJobProgressRecorder_HasDirPathFailedDeletion_Call {
|
||||
return &MockJobProgressRecorder_HasDirPathFailedDeletion_Call{Call: _e.mock.On("HasDirPathFailedDeletion", folderPath)}
|
||||
}
|
||||
|
||||
func (_c *MockJobProgressRecorder_HasDirPathFailedDeletion_Call) Run(run func(folderPath string)) *MockJobProgressRecorder_HasDirPathFailedDeletion_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockJobProgressRecorder_HasDirPathFailedDeletion_Call) Return(_a0 bool) *MockJobProgressRecorder_HasDirPathFailedDeletion_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockJobProgressRecorder_HasDirPathFailedDeletion_Call) RunAndReturn(run func(string) bool) *MockJobProgressRecorder_HasDirPathFailedDeletion_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// HasDirPathFailedCreation provides a mock function with given fields: path
|
||||
func (_m *MockJobProgressRecorder) HasDirPathFailedCreation(path string) bool {
|
||||
ret := _m.Called(path)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for HasDirPathFailedCreation")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(string) bool); ok {
|
||||
r0 = rf(path)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockJobProgressRecorder_HasDirPathFailedCreation_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HasDirPathFailedCreation'
|
||||
type MockJobProgressRecorder_HasDirPathFailedCreation_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// HasDirPathFailedCreation is a helper method to define mock.On call
|
||||
// - path string
|
||||
func (_e *MockJobProgressRecorder_Expecter) HasDirPathFailedCreation(path interface{}) *MockJobProgressRecorder_HasDirPathFailedCreation_Call {
|
||||
return &MockJobProgressRecorder_HasDirPathFailedCreation_Call{Call: _e.mock.On("HasDirPathFailedCreation", path)}
|
||||
}
|
||||
|
||||
func (_c *MockJobProgressRecorder_HasDirPathFailedCreation_Call) Run(run func(path string)) *MockJobProgressRecorder_HasDirPathFailedCreation_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockJobProgressRecorder_HasDirPathFailedCreation_Call) Return(_a0 bool) *MockJobProgressRecorder_HasDirPathFailedCreation_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockJobProgressRecorder_HasDirPathFailedCreation_Call) RunAndReturn(run func(string) bool) *MockJobProgressRecorder_HasDirPathFailedCreation_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Record provides a mock function with given fields: ctx, result
|
||||
func (_m *MockJobProgressRecorder) Record(ctx context.Context, result JobResourceResult) {
|
||||
_m.Called(ctx, result)
|
||||
|
||||
@@ -2,6 +2,7 @@ package jobs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -9,6 +10,8 @@ import (
|
||||
"github.com/grafana/grafana-app-sdk/logging"
|
||||
provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
|
||||
"github.com/grafana/grafana/apps/provisioning/pkg/repository"
|
||||
"github.com/grafana/grafana/apps/provisioning/pkg/safepath"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/resources"
|
||||
)
|
||||
|
||||
// maybeNotifyProgress will only notify if a certain amount of time has passed
|
||||
@@ -58,6 +61,8 @@ type jobProgressRecorder struct {
|
||||
notifyImmediatelyFn ProgressFn
|
||||
maybeNotifyFn ProgressFn
|
||||
summaries map[string]*provisioning.JobResourceSummary
|
||||
failedCreations []string // Tracks folder paths that failed to be created
|
||||
failedDeletions []string // Tracks resource paths that failed to be deleted
|
||||
}
|
||||
|
||||
func newJobProgressRecorder(ProgressFn ProgressFn) JobProgressRecorder {
|
||||
@@ -84,10 +89,26 @@ func (r *jobProgressRecorder) Record(ctx context.Context, result JobResourceResu
|
||||
if result.Error != nil {
|
||||
shouldLogError = true
|
||||
logErr = result.Error
|
||||
if len(r.errors) < 20 {
|
||||
r.errors = append(r.errors, result.Error.Error())
|
||||
|
||||
// Don't count ignored actions as errors in error count or error list
|
||||
if result.Action != repository.FileActionIgnored {
|
||||
if len(r.errors) < 20 {
|
||||
r.errors = append(r.errors, result.Error.Error())
|
||||
}
|
||||
r.errorCount++
|
||||
}
|
||||
|
||||
// Automatically track failed operations based on error type and action
|
||||
// Check if this is a PathCreationError (folder creation failure)
|
||||
var pathErr *resources.PathCreationError
|
||||
if errors.As(result.Error, &pathErr) {
|
||||
r.failedCreations = append(r.failedCreations, pathErr.Path)
|
||||
}
|
||||
|
||||
// Track failed deletions, any deletion will stop the deletion of the parent folder (as it won't be empty)
|
||||
if result.Action == repository.FileActionDeleted {
|
||||
r.failedDeletions = append(r.failedDeletions, result.Path)
|
||||
}
|
||||
r.errorCount++
|
||||
}
|
||||
|
||||
r.updateSummary(result)
|
||||
@@ -112,6 +133,8 @@ func (r *jobProgressRecorder) ResetResults() {
|
||||
r.errorCount = 0
|
||||
r.errors = nil
|
||||
r.summaries = make(map[string]*provisioning.JobResourceSummary)
|
||||
r.failedCreations = nil
|
||||
r.failedDeletions = nil
|
||||
}
|
||||
|
||||
func (r *jobProgressRecorder) SetMessage(ctx context.Context, msg string) {
|
||||
@@ -309,3 +332,29 @@ func (r *jobProgressRecorder) Complete(ctx context.Context, err error) provision
|
||||
|
||||
return jobStatus
|
||||
}
|
||||
|
||||
// HasDirPathFailedCreation checks if a path is nested under any failed folder creation
|
||||
func (r *jobProgressRecorder) HasDirPathFailedCreation(path string) bool {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
for _, failedCreation := range r.failedCreations {
|
||||
if safepath.InDir(path, failedCreation) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// HasDirPathFailedDeletion checks if any resource deletions failed under a folder path
|
||||
func (r *jobProgressRecorder) HasDirPathFailedDeletion(folderPath string) bool {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
for _, failedDeletion := range r.failedDeletions {
|
||||
if safepath.InDir(failedDeletion, folderPath) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
|
||||
"github.com/grafana/grafana/apps/provisioning/pkg/repository"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -252,3 +253,221 @@ func TestJobProgressRecorderWarningOnlyNoErrors(t *testing.T) {
|
||||
require.NotNil(t, finalStatus.Warnings)
|
||||
assert.Len(t, finalStatus.Warnings, 1)
|
||||
}
|
||||
|
||||
func TestJobProgressRecorderFolderFailureTracking(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a progress recorder
|
||||
mockProgressFn := func(ctx context.Context, status provisioning.JobStatus) error {
|
||||
return nil
|
||||
}
|
||||
recorder := newJobProgressRecorder(mockProgressFn).(*jobProgressRecorder)
|
||||
|
||||
// Record a folder creation failure with PathCreationError
|
||||
pathErr := &resources.PathCreationError{
|
||||
Path: "folder1/",
|
||||
Err: assert.AnError,
|
||||
}
|
||||
recorder.Record(ctx, JobResourceResult{
|
||||
Path: "folder1/file.json",
|
||||
Action: repository.FileActionCreated,
|
||||
Error: pathErr,
|
||||
})
|
||||
|
||||
// Record another PathCreationError for a different folder
|
||||
pathErr2 := &resources.PathCreationError{
|
||||
Path: "folder2/subfolder/",
|
||||
Err: assert.AnError,
|
||||
}
|
||||
recorder.Record(ctx, JobResourceResult{
|
||||
Path: "folder2/subfolder/file.json",
|
||||
Action: repository.FileActionCreated,
|
||||
Error: pathErr2,
|
||||
})
|
||||
|
||||
// Record a deletion failure
|
||||
recorder.Record(ctx, JobResourceResult{
|
||||
Path: "folder3/file1.json",
|
||||
Action: repository.FileActionDeleted,
|
||||
Error: assert.AnError,
|
||||
})
|
||||
|
||||
// Record another deletion failure
|
||||
recorder.Record(ctx, JobResourceResult{
|
||||
Path: "folder4/subfolder/file2.json",
|
||||
Action: repository.FileActionDeleted,
|
||||
Error: assert.AnError,
|
||||
})
|
||||
|
||||
// Verify failed creations are tracked
|
||||
recorder.mu.RLock()
|
||||
assert.Len(t, recorder.failedCreations, 2)
|
||||
assert.Contains(t, recorder.failedCreations, "folder1/")
|
||||
assert.Contains(t, recorder.failedCreations, "folder2/subfolder/")
|
||||
|
||||
// Verify failed deletions are tracked
|
||||
assert.Len(t, recorder.failedDeletions, 2)
|
||||
assert.Contains(t, recorder.failedDeletions, "folder3/file1.json")
|
||||
assert.Contains(t, recorder.failedDeletions, "folder4/subfolder/file2.json")
|
||||
recorder.mu.RUnlock()
|
||||
}
|
||||
|
||||
func TestJobProgressRecorderHasDirPathFailedCreation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a progress recorder
|
||||
mockProgressFn := func(ctx context.Context, status provisioning.JobStatus) error {
|
||||
return nil
|
||||
}
|
||||
recorder := newJobProgressRecorder(mockProgressFn).(*jobProgressRecorder)
|
||||
|
||||
// Add failed creations via Record
|
||||
pathErr1 := &resources.PathCreationError{
|
||||
Path: "folder1/",
|
||||
Err: assert.AnError,
|
||||
}
|
||||
recorder.Record(ctx, JobResourceResult{
|
||||
Path: "folder1/file.json",
|
||||
Action: repository.FileActionCreated,
|
||||
Error: pathErr1,
|
||||
})
|
||||
|
||||
pathErr2 := &resources.PathCreationError{
|
||||
Path: "folder2/subfolder/",
|
||||
Err: assert.AnError,
|
||||
}
|
||||
recorder.Record(ctx, JobResourceResult{
|
||||
Path: "folder2/subfolder/file.json",
|
||||
Action: repository.FileActionCreated,
|
||||
Error: pathErr2,
|
||||
})
|
||||
|
||||
// Test nested paths
|
||||
assert.True(t, recorder.HasDirPathFailedCreation("folder1/file.json"))
|
||||
assert.True(t, recorder.HasDirPathFailedCreation("folder1/nested/file.json"))
|
||||
assert.True(t, recorder.HasDirPathFailedCreation("folder2/subfolder/file.json"))
|
||||
|
||||
// Test non-nested paths
|
||||
assert.False(t, recorder.HasDirPathFailedCreation("folder2/file2.json"))
|
||||
assert.False(t, recorder.HasDirPathFailedCreation("folder2/othersubfolder/inside.json"))
|
||||
assert.False(t, recorder.HasDirPathFailedCreation("other/file.json"))
|
||||
assert.False(t, recorder.HasDirPathFailedCreation("folder3/file.json"))
|
||||
assert.False(t, recorder.HasDirPathFailedCreation("file.json"))
|
||||
}
|
||||
|
||||
func TestJobProgressRecorderHasDirPathFailedDeletion(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a progress recorder
|
||||
mockProgressFn := func(ctx context.Context, status provisioning.JobStatus) error {
|
||||
return nil
|
||||
}
|
||||
recorder := newJobProgressRecorder(mockProgressFn).(*jobProgressRecorder)
|
||||
|
||||
// Add failed deletions via Record
|
||||
recorder.Record(ctx, JobResourceResult{
|
||||
Path: "folder1/file1.json",
|
||||
Action: repository.FileActionDeleted,
|
||||
Error: assert.AnError,
|
||||
})
|
||||
|
||||
recorder.Record(ctx, JobResourceResult{
|
||||
Path: "folder2/subfolder/file2.json",
|
||||
Action: repository.FileActionDeleted,
|
||||
Error: assert.AnError,
|
||||
})
|
||||
|
||||
recorder.Record(ctx, JobResourceResult{
|
||||
Path: "folder3/nested/deep/file3.json",
|
||||
Action: repository.FileActionDeleted,
|
||||
Error: assert.AnError,
|
||||
})
|
||||
|
||||
// Test folder paths with failed deletions
|
||||
assert.True(t, recorder.HasDirPathFailedDeletion("folder1/"))
|
||||
assert.True(t, recorder.HasDirPathFailedDeletion("folder2/"))
|
||||
assert.True(t, recorder.HasDirPathFailedDeletion("folder2/subfolder/"))
|
||||
assert.True(t, recorder.HasDirPathFailedDeletion("folder3/"))
|
||||
assert.True(t, recorder.HasDirPathFailedDeletion("folder3/nested/"))
|
||||
assert.True(t, recorder.HasDirPathFailedDeletion("folder3/nested/deep/"))
|
||||
|
||||
// Test folder paths without failed deletions
|
||||
assert.False(t, recorder.HasDirPathFailedDeletion("other/"))
|
||||
assert.False(t, recorder.HasDirPathFailedDeletion("different/"))
|
||||
assert.False(t, recorder.HasDirPathFailedDeletion("folder2/othersubfolder/"))
|
||||
assert.False(t, recorder.HasDirPathFailedDeletion("folder2/subfolder/othersubfolder/"))
|
||||
assert.False(t, recorder.HasDirPathFailedDeletion("folder3/nested/anotherdeep/"))
|
||||
assert.False(t, recorder.HasDirPathFailedDeletion("folder3/nested/deep/insidedeep/"))
|
||||
}
|
||||
|
||||
func TestJobProgressRecorderResetResults(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a progress recorder
|
||||
mockProgressFn := func(ctx context.Context, status provisioning.JobStatus) error {
|
||||
return nil
|
||||
}
|
||||
recorder := newJobProgressRecorder(mockProgressFn).(*jobProgressRecorder)
|
||||
|
||||
// Add some data via Record
|
||||
pathErr := &resources.PathCreationError{
|
||||
Path: "folder1/",
|
||||
Err: assert.AnError,
|
||||
}
|
||||
recorder.Record(ctx, JobResourceResult{
|
||||
Path: "folder1/file.json",
|
||||
Action: repository.FileActionCreated,
|
||||
Error: pathErr,
|
||||
})
|
||||
|
||||
recorder.Record(ctx, JobResourceResult{
|
||||
Path: "folder2/file.json",
|
||||
Action: repository.FileActionDeleted,
|
||||
Error: assert.AnError,
|
||||
})
|
||||
|
||||
// Verify data is stored
|
||||
recorder.mu.RLock()
|
||||
assert.Len(t, recorder.failedCreations, 1)
|
||||
assert.Len(t, recorder.failedDeletions, 1)
|
||||
recorder.mu.RUnlock()
|
||||
|
||||
// Reset results
|
||||
recorder.ResetResults()
|
||||
|
||||
// Verify data is cleared
|
||||
recorder.mu.RLock()
|
||||
assert.Nil(t, recorder.failedCreations)
|
||||
assert.Nil(t, recorder.failedDeletions)
|
||||
recorder.mu.RUnlock()
|
||||
}
|
||||
|
||||
func TestJobProgressRecorderIgnoredActionsDontCountAsErrors(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a progress recorder
|
||||
mockProgressFn := func(ctx context.Context, status provisioning.JobStatus) error {
|
||||
return nil
|
||||
}
|
||||
recorder := newJobProgressRecorder(mockProgressFn).(*jobProgressRecorder)
|
||||
|
||||
// Record an ignored action with error
|
||||
recorder.Record(ctx, JobResourceResult{
|
||||
Path: "folder1/file1.json",
|
||||
Action: repository.FileActionIgnored,
|
||||
Error: assert.AnError,
|
||||
})
|
||||
|
||||
// Record a real error for comparison
|
||||
recorder.Record(ctx, JobResourceResult{
|
||||
Path: "folder2/file2.json",
|
||||
Action: repository.FileActionCreated,
|
||||
Error: assert.AnError,
|
||||
})
|
||||
|
||||
// Verify error count doesn't include ignored actions
|
||||
recorder.mu.RLock()
|
||||
assert.Equal(t, 1, recorder.errorCount, "ignored actions should not be counted as errors")
|
||||
assert.Len(t, recorder.errors, 1, "ignored action errors should not be in error list")
|
||||
recorder.mu.RUnlock()
|
||||
}
|
||||
|
||||
@@ -29,6 +29,10 @@ type JobProgressRecorder interface {
|
||||
StrictMaxErrors(maxErrors int)
|
||||
SetRefURLs(ctx context.Context, refURLs *provisioning.RepositoryURLs)
|
||||
Complete(ctx context.Context, err error) provisioning.JobStatus
|
||||
// HasDirPathFailedCreation checks if a path has any folder creations that failed
|
||||
HasDirPathFailedCreation(path string) bool
|
||||
// HasDirPathFailedDeletion checks if a folderPath has any folder deletions that failed
|
||||
HasDirPathFailedDeletion(folderPath string) bool
|
||||
}
|
||||
|
||||
// Worker is a worker that can process a job
|
||||
|
||||
@@ -75,11 +75,47 @@ func FullSync(
|
||||
return applyChanges(ctx, changes, clients, repositoryResources, progress, tracer, maxSyncWorkers, metrics)
|
||||
}
|
||||
|
||||
// shouldSkipChange checks if a change should be skipped based on previous failures on parent/child folders.
|
||||
// If there is a previous failure on the path, we don't need to process the change as it will fail anyway.
|
||||
func shouldSkipChange(ctx context.Context, change ResourceFileChange, progress jobs.JobProgressRecorder, tracer tracing.Tracer) bool {
|
||||
if change.Action != repository.FileActionDeleted && progress.HasDirPathFailedCreation(change.Path) {
|
||||
skipCtx, skipSpan := tracer.Start(ctx, "provisioning.sync.full.apply_changes.skip_nested_resource")
|
||||
skipSpan.SetAttributes(attribute.String("path", change.Path))
|
||||
progress.Record(skipCtx, jobs.JobResourceResult{
|
||||
Path: change.Path,
|
||||
Action: repository.FileActionIgnored,
|
||||
Warning: fmt.Errorf("resource was not processed because the parent folder could not be created"),
|
||||
})
|
||||
skipSpan.End()
|
||||
return true
|
||||
}
|
||||
|
||||
if change.Action == repository.FileActionDeleted && safepath.IsDir(change.Path) && progress.HasDirPathFailedDeletion(change.Path) {
|
||||
skipCtx, skipSpan := tracer.Start(ctx, "provisioning.sync.full.apply_changes.skip_folder_with_failed_deletions")
|
||||
skipSpan.SetAttributes(attribute.String("path", change.Path))
|
||||
progress.Record(skipCtx, jobs.JobResourceResult{
|
||||
Path: change.Path,
|
||||
Action: repository.FileActionIgnored,
|
||||
Group: resources.FolderKind.Group,
|
||||
Kind: resources.FolderKind.Kind,
|
||||
Warning: fmt.Errorf("folder was not processed because children resources in its path could not be deleted"),
|
||||
})
|
||||
skipSpan.End()
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func applyChange(ctx context.Context, change ResourceFileChange, clients resources.ResourceClients, repositoryResources resources.RepositoryResources, progress jobs.JobProgressRecorder, tracer tracing.Tracer) {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if shouldSkipChange(ctx, change, progress, tracer) {
|
||||
return
|
||||
}
|
||||
|
||||
if change.Action == repository.FileActionDeleted {
|
||||
deleteCtx, deleteSpan := tracer.Start(ctx, "provisioning.sync.full.apply_changes.delete")
|
||||
result := jobs.JobResourceResult{
|
||||
@@ -138,6 +174,7 @@ func applyChange(ctx context.Context, change ResourceFileChange, clients resourc
|
||||
ensureFolderSpan.RecordError(err)
|
||||
ensureFolderSpan.End()
|
||||
progress.Record(ctx, result)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -253,8 +290,6 @@ func applyChanges(ctx context.Context, changes []ResourceFileChange, clients res
|
||||
}
|
||||
|
||||
func applyFoldersSerially(ctx context.Context, folders []ResourceFileChange, clients resources.ResourceClients, repositoryResources resources.RepositoryResources, progress jobs.JobProgressRecorder, tracer tracing.Tracer) error {
|
||||
logger := logging.FromContext(ctx)
|
||||
|
||||
for _, folder := range folders {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
@@ -264,23 +299,9 @@ func applyFoldersSerially(ctx context.Context, folders []ResourceFileChange, cli
|
||||
return err
|
||||
}
|
||||
|
||||
folderCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
|
||||
applyChange(folderCtx, folder, clients, repositoryResources, progress, tracer)
|
||||
|
||||
if folderCtx.Err() == context.DeadlineExceeded {
|
||||
logger.Error("operation timed out after 15 seconds", "path", folder.Path, "action", folder.Action)
|
||||
|
||||
recordCtx, recordCancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
progress.Record(recordCtx, jobs.JobResourceResult{
|
||||
Path: folder.Path,
|
||||
Action: folder.Action,
|
||||
Error: fmt.Errorf("operation timed out after 15 seconds"),
|
||||
})
|
||||
recordCancel()
|
||||
}
|
||||
|
||||
cancel()
|
||||
wrapWithTimeout(ctx, 15*time.Second, func(timeoutCtx context.Context) {
|
||||
applyChange(timeoutCtx, folder, clients, repositoryResources, progress, tracer)
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -318,7 +339,9 @@ loop:
|
||||
defer wg.Done()
|
||||
defer func() { <-sem }()
|
||||
|
||||
applyChangeWithTimeout(ctx, change, clients, repositoryResources, progress, tracer, logger)
|
||||
wrapWithTimeout(ctx, 15*time.Second, func(timeoutCtx context.Context) {
|
||||
applyChange(timeoutCtx, change, clients, repositoryResources, progress, tracer)
|
||||
})
|
||||
}(change)
|
||||
}
|
||||
|
||||
@@ -331,21 +354,10 @@ loop:
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
func applyChangeWithTimeout(ctx context.Context, change ResourceFileChange, clients resources.ResourceClients, repositoryResources resources.RepositoryResources, progress jobs.JobProgressRecorder, tracer tracing.Tracer, logger logging.Logger) {
|
||||
changeCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
// wrapWithTimeout wraps a function call with a timeout context
|
||||
func wrapWithTimeout(ctx context.Context, timeout time.Duration, fn func(context.Context)) {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
applyChange(changeCtx, change, clients, repositoryResources, progress, tracer)
|
||||
|
||||
if changeCtx.Err() == context.DeadlineExceeded {
|
||||
logger.Error("operation timed out after 15 seconds", "path", change.Path, "action", change.Action)
|
||||
|
||||
recordCtx, recordCancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
progress.Record(recordCtx, jobs.JobResourceResult{
|
||||
Path: change.Path,
|
||||
Action: change.Action,
|
||||
Error: fmt.Errorf("operation timed out after 15 seconds"),
|
||||
})
|
||||
recordCancel()
|
||||
}
|
||||
fn(timeoutCtx)
|
||||
}
|
||||
|
||||
@@ -0,0 +1,432 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
dynamicfake "k8s.io/client-go/dynamic/fake"
|
||||
k8testing "k8s.io/client-go/testing"
|
||||
|
||||
provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
|
||||
"github.com/grafana/grafana/apps/provisioning/pkg/repository"
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/resources"
|
||||
)
|
||||
|
||||
/*
|
||||
TestFullSync_HierarchicalErrorHandling tests the hierarchical error handling behavior:
|
||||
|
||||
FOLDER CREATION FAILURES:
|
||||
- When a folder fails to be created with PathCreationError, all nested resources are skipped
|
||||
- Nested resources are recorded with FileActionIgnored and error "folder was not processed because children resources in its path could not be deleted"
|
||||
- Only the folder creation error counts toward error limits
|
||||
- Nested resource skips do NOT count toward error limits
|
||||
|
||||
FOLDER DELETION FAILURES:
|
||||
- When a file deletion fails, it's tracked in failedDeletions
|
||||
- When cleaning up folders, we check HasDirPathFailedDeletion()
|
||||
- If children failed to delete, folder deletion is skipped with FileActionIgnored
|
||||
- This prevents orphaning resources that still exist
|
||||
|
||||
DELETIONS NOT AFFECTED BY CREATION FAILURES:
|
||||
- If a folder creation fails, deletion operations for resources in that folder still proceed
|
||||
- This is because the resource might already exist from a previous sync
|
||||
- Only creations/updates/renames are affected by failed folder creation
|
||||
|
||||
AUTOMATIC TRACKING:
|
||||
- Record() automatically detects PathCreationError and adds to failedCreations
|
||||
- Record() automatically detects deletion failures and adds to failedDeletions
|
||||
- No manual calls to AddFailedCreation/AddFailedDeletion needed
|
||||
*/
|
||||
func TestFullSync_HierarchicalErrorHandling(t *testing.T) { // nolint:gocyclo
|
||||
tests := []struct {
|
||||
name string
|
||||
setupMocks func(*repository.MockRepository, *resources.MockRepositoryResources, *resources.MockResourceClients, *jobs.MockJobProgressRecorder, *dynamicfake.FakeDynamicClient)
|
||||
changes []ResourceFileChange
|
||||
description string
|
||||
expectError bool
|
||||
errorContains string
|
||||
}{
|
||||
{
|
||||
name: "folder creation fails, nested file skipped",
|
||||
description: "When folder1/ fails to create, folder1/file.json should be skipped with FileActionIgnored",
|
||||
changes: []ResourceFileChange{
|
||||
{Path: "folder1/file.json", Action: repository.FileActionCreated},
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, _ *dynamicfake.FakeDynamicClient) {
|
||||
// First, check if nested under failed creation - not yet
|
||||
progress.On("HasDirPathFailedCreation", "folder1/file.json").Return(false).Once()
|
||||
|
||||
// WriteResourceFromFile fails with PathCreationError for folder1/
|
||||
folderErr := &resources.PathCreationError{Path: "folder1/", Err: fmt.Errorf("permission denied")}
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "folder1/file.json", "").
|
||||
Return("", schema.GroupVersionKind{}, folderErr).Once()
|
||||
|
||||
// File will be recorded with error, triggering automatic tracking of folder1/ failure
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/file.json" && r.Error != nil && r.Action == repository.FileActionCreated
|
||||
})).Return().Once()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "folder creation fails, multiple nested resources skipped",
|
||||
description: "When folder1/ fails to create, all nested resources (subfolder, files) are skipped",
|
||||
changes: []ResourceFileChange{
|
||||
{Path: "folder1/file1.json", Action: repository.FileActionCreated},
|
||||
{Path: "folder1/subfolder/file2.json", Action: repository.FileActionCreated},
|
||||
{Path: "folder1/file3.json", Action: repository.FileActionCreated},
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, _ *dynamicfake.FakeDynamicClient) {
|
||||
// First file triggers folder creation failure
|
||||
progress.On("HasDirPathFailedCreation", "folder1/file1.json").Return(false).Once()
|
||||
folderErr := &resources.PathCreationError{Path: "folder1/", Err: fmt.Errorf("permission denied")}
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "folder1/file1.json", "").
|
||||
Return("", schema.GroupVersionKind{}, folderErr).Once()
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/file1.json" && r.Error != nil
|
||||
})).Return().Once()
|
||||
|
||||
// Subsequent files in same folder are skipped
|
||||
progress.On("HasDirPathFailedCreation", "folder1/subfolder/file2.json").Return(true).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/subfolder/file2.json" &&
|
||||
r.Action == repository.FileActionIgnored &&
|
||||
r.Warning != nil &&
|
||||
r.Warning.Error() == "resource was not processed because the parent folder could not be created"
|
||||
})).Return().Once()
|
||||
|
||||
progress.On("HasDirPathFailedCreation", "folder1/file3.json").Return(true).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/file3.json" &&
|
||||
r.Action == repository.FileActionIgnored &&
|
||||
r.Warning != nil &&
|
||||
r.Warning.Error() == "resource was not processed because the parent folder could not be created"
|
||||
})).Return().Once()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "file deletion failure tracked",
|
||||
description: "When a file deletion fails, it's automatically tracked in failedDeletions",
|
||||
changes: []ResourceFileChange{
|
||||
{
|
||||
Path: "folder1/file.json",
|
||||
Action: repository.FileActionDeleted,
|
||||
Existing: &provisioning.ResourceListItem{
|
||||
Name: "file1",
|
||||
Group: "dashboard.grafana.app",
|
||||
Resource: "dashboards",
|
||||
},
|
||||
},
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, dynamicClient *dynamicfake.FakeDynamicClient) {
|
||||
gvk := schema.GroupVersionKind{Group: "dashboard.grafana.app", Kind: "Dashboard", Version: "v1"}
|
||||
gvr := schema.GroupVersionResource{Group: "dashboard.grafana.app", Resource: "dashboards", Version: "v1"}
|
||||
|
||||
clients.On("ForResource", mock.Anything, mock.MatchedBy(func(gvr schema.GroupVersionResource) bool {
|
||||
return gvr.Group == "dashboard.grafana.app"
|
||||
})).Return(dynamicClient.Resource(gvr), gvk, nil)
|
||||
|
||||
// File deletion fails
|
||||
dynamicClient.PrependReactor("delete", "dashboards", func(action k8testing.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, fmt.Errorf("permission denied")
|
||||
})
|
||||
|
||||
// File deletion recorded with error, automatically tracked in failedDeletions
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/file.json" &&
|
||||
r.Action == repository.FileActionDeleted &&
|
||||
r.Error != nil
|
||||
})).Return().Once()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deletion proceeds despite creation failure",
|
||||
description: "When folder1/ fails to create, deletion of folder1/file2.json still proceeds (resource might exist from previous sync)",
|
||||
changes: []ResourceFileChange{
|
||||
{Path: "folder1/file1.json", Action: repository.FileActionCreated},
|
||||
{
|
||||
Path: "folder1/file2.json",
|
||||
Action: repository.FileActionDeleted,
|
||||
Existing: &provisioning.ResourceListItem{
|
||||
Name: "file2",
|
||||
Group: "dashboard.grafana.app",
|
||||
Resource: "dashboards",
|
||||
},
|
||||
},
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, dynamicClient *dynamicfake.FakeDynamicClient) {
|
||||
// Creation fails
|
||||
progress.On("HasDirPathFailedCreation", "folder1/file1.json").Return(false).Once()
|
||||
folderErr := &resources.PathCreationError{Path: "folder1/", Err: fmt.Errorf("permission denied")}
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "folder1/file1.json", "").
|
||||
Return("", schema.GroupVersionKind{}, folderErr).Once()
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/file1.json" && r.Error != nil
|
||||
})).Return().Once()
|
||||
|
||||
// Deletion proceeds (NOT checking HasDirPathFailedCreation for deletions)
|
||||
// Note: deletion will fail because resource doesn't exist, but that's fine for this test
|
||||
gvk := schema.GroupVersionKind{Group: "dashboard.grafana.app", Kind: "Dashboard", Version: "v1"}
|
||||
gvr := schema.GroupVersionResource{Group: "dashboard.grafana.app", Resource: "dashboards", Version: "v1"}
|
||||
|
||||
clients.On("ForResource", mock.Anything, mock.MatchedBy(func(gvr schema.GroupVersionResource) bool {
|
||||
return gvr.Group == "dashboard.grafana.app"
|
||||
})).Return(dynamicClient.Resource(gvr), gvk, nil)
|
||||
|
||||
// Record deletion attempt (will have error since resource doesn't exist, but that's ok)
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/file2.json" &&
|
||||
r.Action == repository.FileActionDeleted
|
||||
// Not checking r.Error because resource doesn't exist in fake client
|
||||
})).Return().Once()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multi-level nesting - all skipped",
|
||||
description: "When level1/ fails, level1/level2/level3/file.json is also skipped",
|
||||
changes: []ResourceFileChange{
|
||||
{Path: "level1/file1.json", Action: repository.FileActionCreated},
|
||||
{Path: "level1/level2/file2.json", Action: repository.FileActionCreated},
|
||||
{Path: "level1/level2/level3/file3.json", Action: repository.FileActionCreated},
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, _ *dynamicfake.FakeDynamicClient) {
|
||||
// First file triggers level1/ failure
|
||||
progress.On("HasDirPathFailedCreation", "level1/file1.json").Return(false).Once()
|
||||
folderErr := &resources.PathCreationError{Path: "level1/", Err: fmt.Errorf("permission denied")}
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "level1/file1.json", "").
|
||||
Return("", schema.GroupVersionKind{}, folderErr).Once()
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "level1/file1.json" && r.Error != nil
|
||||
})).Return().Once()
|
||||
|
||||
// All nested files are skipped
|
||||
for _, path := range []string{"level1/level2/file2.json", "level1/level2/level3/file3.json"} {
|
||||
progress.On("HasDirPathFailedCreation", path).Return(true).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == path && r.Action == repository.FileActionIgnored
|
||||
})).Return().Once()
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mixed success and failure",
|
||||
description: "When success/ works and failure/ fails, only failure/* are skipped",
|
||||
changes: []ResourceFileChange{
|
||||
{Path: "success/file1.json", Action: repository.FileActionCreated},
|
||||
{Path: "failure/file2.json", Action: repository.FileActionCreated},
|
||||
{Path: "failure/nested/file3.json", Action: repository.FileActionCreated},
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, _ *dynamicfake.FakeDynamicClient) {
|
||||
// Success path works
|
||||
progress.On("HasDirPathFailedCreation", "success/file1.json").Return(false).Once()
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "success/file1.json", "").
|
||||
Return("resource1", schema.GroupVersionKind{Kind: "Dashboard"}, nil).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "success/file1.json" && r.Error == nil
|
||||
})).Return().Once()
|
||||
|
||||
// Failure path fails
|
||||
progress.On("HasDirPathFailedCreation", "failure/file2.json").Return(false).Once()
|
||||
folderErr := &resources.PathCreationError{Path: "failure/", Err: fmt.Errorf("disk full")}
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "failure/file2.json", "").
|
||||
Return("", schema.GroupVersionKind{}, folderErr).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "failure/file2.json" && r.Error != nil
|
||||
})).Return().Once()
|
||||
|
||||
// Nested file in failure path is skipped
|
||||
progress.On("HasDirPathFailedCreation", "failure/nested/file3.json").Return(true).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "failure/nested/file3.json" && r.Action == repository.FileActionIgnored
|
||||
})).Return().Once()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "folder creation fails with explicit folder in changes",
|
||||
description: "When folder1/ is explicitly in changes and fails to create, all nested resources (subfolders and files) are skipped",
|
||||
changes: []ResourceFileChange{
|
||||
{Path: "folder1/", Action: repository.FileActionCreated},
|
||||
{Path: "folder1/subfolder/", Action: repository.FileActionCreated},
|
||||
{Path: "folder1/file1.json", Action: repository.FileActionCreated},
|
||||
{Path: "folder1/subfolder/file2.json", Action: repository.FileActionCreated},
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, _ *dynamicfake.FakeDynamicClient) {
|
||||
progress.On("HasDirPathFailedCreation", "folder1/").Return(false).Once()
|
||||
folderErr := &resources.PathCreationError{Path: "folder1/", Err: fmt.Errorf("permission denied")}
|
||||
repoResources.On("EnsureFolderPathExist", mock.Anything, "folder1/").Return("", folderErr).Once()
|
||||
|
||||
progress.On("HasDirPathFailedCreation", "folder1/subfolder/").Return(true).Once()
|
||||
progress.On("HasDirPathFailedCreation", "folder1/file1.json").Return(true).Once()
|
||||
progress.On("HasDirPathFailedCreation", "folder1/subfolder/file2.json").Return(true).Once()
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/" && r.Error != nil
|
||||
})).Return().Once()
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/subfolder/" && r.Action == repository.FileActionIgnored
|
||||
})).Return().Once()
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/file1.json" && r.Action == repository.FileActionIgnored
|
||||
})).Return().Once()
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/subfolder/file2.json" && r.Action == repository.FileActionIgnored
|
||||
})).Return().Once()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "folder deletion prevented when child deletion fails",
|
||||
description: "When a file deletion fails, folder deletion is skipped with FileActionIgnored to prevent orphaning resources",
|
||||
changes: []ResourceFileChange{
|
||||
{
|
||||
Path: "folder1/file1.json",
|
||||
Action: repository.FileActionDeleted,
|
||||
Existing: &provisioning.ResourceListItem{Name: "file1", Group: "dashboard.grafana.app", Resource: "dashboards"},
|
||||
},
|
||||
{Path: "folder1/", Action: repository.FileActionDeleted, Existing: &provisioning.ResourceListItem{Name: "folder1", Group: "folder.grafana.app", Resource: "Folder"}},
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, dynamicClient *dynamicfake.FakeDynamicClient) {
|
||||
gvk := schema.GroupVersionKind{Group: "dashboard.grafana.app", Kind: "Dashboard", Version: "v1"}
|
||||
gvr := schema.GroupVersionResource{Group: "dashboard.grafana.app", Resource: "dashboards", Version: "v1"}
|
||||
|
||||
clients.On("ForResource", mock.Anything, mock.MatchedBy(func(gvr schema.GroupVersionResource) bool {
|
||||
return gvr.Group == "dashboard.grafana.app"
|
||||
})).Return(dynamicClient.Resource(gvr), gvk, nil)
|
||||
|
||||
dynamicClient.PrependReactor("delete", "dashboards", func(action k8testing.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, fmt.Errorf("permission denied")
|
||||
})
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/file1.json" && r.Error != nil
|
||||
})).Return().Once()
|
||||
|
||||
progress.On("HasDirPathFailedDeletion", "folder1/").Return(true).Once()
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/" && r.Action == repository.FileActionIgnored
|
||||
})).Return().Once()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple folder deletion failures",
|
||||
description: "When multiple independent folders have child deletion failures, all folder deletions are skipped",
|
||||
changes: []ResourceFileChange{
|
||||
{Path: "folder1/file1.json", Action: repository.FileActionDeleted, Existing: &provisioning.ResourceListItem{Name: "file1", Group: "dashboard.grafana.app", Resource: "dashboards"}},
|
||||
{Path: "folder1/", Action: repository.FileActionDeleted},
|
||||
{Path: "folder2/file2.json", Action: repository.FileActionDeleted, Existing: &provisioning.ResourceListItem{Name: "file2", Group: "dashboard.grafana.app", Resource: "dashboards"}},
|
||||
{Path: "folder2/", Action: repository.FileActionDeleted},
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, dynamicClient *dynamicfake.FakeDynamicClient) {
|
||||
gvk := schema.GroupVersionKind{Group: "dashboard.grafana.app", Kind: "Dashboard", Version: "v1"}
|
||||
gvr := schema.GroupVersionResource{Group: "dashboard.grafana.app", Resource: "dashboards", Version: "v1"}
|
||||
clients.On("ForResource", mock.Anything, mock.MatchedBy(func(gvr schema.GroupVersionResource) bool {
|
||||
return gvr.Group == "dashboard.grafana.app"
|
||||
})).Return(dynamicClient.Resource(gvr), gvk, nil)
|
||||
|
||||
dynamicClient.PrependReactor("delete", "dashboards", func(action k8testing.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, fmt.Errorf("permission denied")
|
||||
})
|
||||
|
||||
for _, path := range []string{"folder1/file1.json", "folder2/file2.json"} {
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == path && r.Error != nil
|
||||
})).Return().Once()
|
||||
}
|
||||
|
||||
progress.On("HasDirPathFailedDeletion", "folder1/").Return(true).Once()
|
||||
progress.On("HasDirPathFailedDeletion", "folder2/").Return(true).Once()
|
||||
|
||||
for _, path := range []string{"folder1/", "folder2/"} {
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == path && r.Action == repository.FileActionIgnored
|
||||
})).Return().Once()
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nested subfolder deletion failure",
|
||||
description: "When a file deletion fails in a nested subfolder, both the subfolder and parent folder deletions are skipped",
|
||||
changes: []ResourceFileChange{
|
||||
{Path: "parent/subfolder/file.json", Action: repository.FileActionDeleted, Existing: &provisioning.ResourceListItem{Name: "file1", Group: "dashboard.grafana.app", Resource: "dashboards"}},
|
||||
{Path: "parent/subfolder/", Action: repository.FileActionDeleted, Existing: &provisioning.ResourceListItem{Name: "subfolder", Group: "folder.grafana.app", Resource: "Folder"}},
|
||||
{Path: "parent/", Action: repository.FileActionDeleted, Existing: &provisioning.ResourceListItem{Name: "parent", Group: "folder.grafana.app", Resource: "Folder"}},
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, dynamicClient *dynamicfake.FakeDynamicClient) {
|
||||
gvk := schema.GroupVersionKind{Group: "dashboard.grafana.app", Kind: "Dashboard", Version: "v1"}
|
||||
gvr := schema.GroupVersionResource{Group: "dashboard.grafana.app", Resource: "dashboards", Version: "v1"}
|
||||
clients.On("ForResource", mock.Anything, mock.MatchedBy(func(gvr schema.GroupVersionResource) bool {
|
||||
return gvr.Group == "dashboard.grafana.app"
|
||||
})).Return(dynamicClient.Resource(gvr), gvk, nil)
|
||||
|
||||
dynamicClient.PrependReactor("delete", "dashboards", func(action k8testing.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, fmt.Errorf("permission denied")
|
||||
})
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "parent/subfolder/file.json" && r.Error != nil
|
||||
})).Return().Once()
|
||||
|
||||
progress.On("HasDirPathFailedDeletion", "parent/subfolder/").Return(true).Once()
|
||||
progress.On("HasDirPathFailedDeletion", "parent/").Return(true).Once()
|
||||
|
||||
for _, path := range []string{"parent/subfolder/", "parent/"} {
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == path && r.Action == repository.FileActionIgnored
|
||||
})).Return().Once()
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
dynamicClient := dynamicfake.NewSimpleDynamicClient(scheme)
|
||||
|
||||
repo := repository.NewMockRepository(t)
|
||||
repoResources := resources.NewMockRepositoryResources(t)
|
||||
clients := resources.NewMockResourceClients(t)
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
compareFn := NewMockCompareFn(t)
|
||||
|
||||
repo.On("Config").Return(&provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test-repo"},
|
||||
Spec: provisioning.RepositorySpec{Title: "Test Repo"},
|
||||
})
|
||||
|
||||
tt.setupMocks(repo, repoResources, clients, progress, dynamicClient)
|
||||
|
||||
compareFn.On("Execute", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tt.changes, nil)
|
||||
progress.On("SetTotal", mock.Anything, len(tt.changes)).Return()
|
||||
progress.On("TooManyErrors").Return(nil).Maybe()
|
||||
|
||||
err := FullSync(context.Background(), repo, compareFn.Execute, clients, "ref", repoResources, progress, tracing.NewNoopTracerService(), 10, jobs.RegisterJobMetrics(prometheus.NewPedanticRegistry()))
|
||||
|
||||
if tt.expectError {
|
||||
require.Error(t, err)
|
||||
if tt.errorContains != "" {
|
||||
require.Contains(t, err.Error(), tt.errorContains)
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
progress.AssertExpectations(t)
|
||||
repoResources.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -213,6 +213,10 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo
|
||||
return nil
|
||||
})
|
||||
|
||||
progress.On("HasDirPathFailedCreation", mock.MatchedBy(func(path string) bool {
|
||||
return path == "dashboards/one.json" || path == "dashboards/two.json" || path == "dashboards/three.json"
|
||||
})).Return(false).Maybe()
|
||||
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, mock.MatchedBy(func(path string) bool {
|
||||
return path == "dashboards/one.json" || path == "dashboards/two.json" || path == "dashboards/three.json"
|
||||
}), "").Return("test-dashboard", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, nil).Maybe()
|
||||
@@ -235,6 +239,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, compareFn *MockCompareFn) {
|
||||
progress.On("TooManyErrors").Return(nil)
|
||||
progress.On("HasDirPathFailedCreation", "dashboards/test.json").Return(false)
|
||||
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "dashboards/test.json", "").
|
||||
Return("test-dashboard", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, nil)
|
||||
@@ -259,6 +264,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, compareFn *MockCompareFn) {
|
||||
progress.On("TooManyErrors").Return(nil)
|
||||
progress.On("HasDirPathFailedCreation", "dashboards/test.json").Return(false)
|
||||
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "dashboards/test.json", "").
|
||||
Return("test-dashboard", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, fmt.Errorf("write error"))
|
||||
@@ -285,6 +291,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, compareFn *MockCompareFn) {
|
||||
progress.On("TooManyErrors").Return(nil)
|
||||
progress.On("HasDirPathFailedCreation", "dashboards/test.json").Return(false)
|
||||
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "dashboards/test.json", "").
|
||||
Return("test-dashboard", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, nil)
|
||||
@@ -309,6 +316,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, compareFn *MockCompareFn) {
|
||||
progress.On("TooManyErrors").Return(nil)
|
||||
progress.On("HasDirPathFailedCreation", "dashboards/test.json").Return(false)
|
||||
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "dashboards/test.json", "").
|
||||
Return("test-dashboard", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, fmt.Errorf("write error"))
|
||||
@@ -335,6 +343,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, compareFn *MockCompareFn) {
|
||||
progress.On("TooManyErrors").Return(nil)
|
||||
progress.On("HasDirPathFailedCreation", "one/two/three/").Return(false)
|
||||
|
||||
repoResources.On("EnsureFolderPathExist", mock.Anything, "one/two/three/").Return("some-folder", nil)
|
||||
progress.On("Record", mock.Anything, jobs.JobResourceResult{
|
||||
@@ -357,6 +366,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, compareFn *MockCompareFn) {
|
||||
progress.On("TooManyErrors").Return(nil)
|
||||
progress.On("HasDirPathFailedCreation", "one/two/three/").Return(false)
|
||||
|
||||
repoResources.On(
|
||||
"EnsureFolderPathExist",
|
||||
@@ -581,6 +591,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, compareFn *MockCompareFn) {
|
||||
progress.On("TooManyErrors").Return(nil)
|
||||
progress.On("HasDirPathFailedDeletion", "to-be-deleted/").Return(false)
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
require.NoError(t, metav1.AddMetaToScheme(scheme))
|
||||
@@ -640,6 +651,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, compareFn *MockCompareFn) {
|
||||
progress.On("TooManyErrors").Return(nil)
|
||||
progress.On("HasDirPathFailedDeletion", "to-be-deleted/").Return(false)
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
require.NoError(t, metav1.AddMetaToScheme(scheme))
|
||||
@@ -695,6 +707,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo
|
||||
},
|
||||
setupMocks: func(repo *repository.MockRepository, repoResources *resources.MockRepositoryResources, clients *resources.MockResourceClients, progress *jobs.MockJobProgressRecorder, compareFn *MockCompareFn) {
|
||||
progress.On("TooManyErrors").Return(nil)
|
||||
progress.On("HasDirPathFailedCreation", "dashboards/slow.json").Return(false)
|
||||
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "dashboards/slow.json", "").
|
||||
Run(func(args mock.Arguments) {
|
||||
@@ -708,19 +721,13 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo
|
||||
}).
|
||||
Return("", schema.GroupVersionKind{}, context.DeadlineExceeded)
|
||||
|
||||
// applyChange records the error from WriteResourceFromFile
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(result jobs.JobResourceResult) bool {
|
||||
return result.Action == repository.FileActionCreated &&
|
||||
result.Path == "dashboards/slow.json" &&
|
||||
result.Error != nil &&
|
||||
result.Error.Error() == "writing resource from file dashboards/slow.json: context deadline exceeded"
|
||||
})).Return().Once()
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(result jobs.JobResourceResult) bool {
|
||||
return result.Action == repository.FileActionCreated &&
|
||||
result.Path == "dashboards/slow.json" &&
|
||||
result.Error != nil &&
|
||||
result.Error.Error() == "operation timed out after 15 seconds"
|
||||
})).Return().Once()
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ func IncrementalSync(ctx context.Context, repo repository.Versioned, previousRef
|
||||
if len(affectedFolders) > 0 {
|
||||
cleanupStart := time.Now()
|
||||
span.AddEvent("checking if impacted folders should be deleted", trace.WithAttributes(attribute.Int("affected_folders", len(affectedFolders))))
|
||||
err := cleanupOrphanedFolders(ctx, repo, affectedFolders, repositoryResources, tracer)
|
||||
err := cleanupOrphanedFolders(ctx, repo, affectedFolders, repositoryResources, tracer, progress)
|
||||
metrics.RecordIncrementalSyncPhase(jobs.IncrementalSyncPhaseCleanup, time.Since(cleanupStart))
|
||||
if err != nil {
|
||||
return tracing.Error(span, fmt.Errorf("cleanup orphaned folders: %w", err))
|
||||
@@ -85,6 +85,20 @@ func applyIncrementalChanges(ctx context.Context, diff []repository.VersionedFil
|
||||
return nil, tracing.Error(span, err)
|
||||
}
|
||||
|
||||
// Check if this resource is nested under a failed folder creation
|
||||
// This only applies to creation/update/rename operations, not deletions
|
||||
if change.Action != repository.FileActionDeleted && progress.HasDirPathFailedCreation(change.Path) {
|
||||
// Skip this resource since its parent folder failed to be created
|
||||
skipCtx, skipSpan := tracer.Start(ctx, "provisioning.sync.incremental.skip_nested_resource")
|
||||
progress.Record(skipCtx, jobs.JobResourceResult{
|
||||
Path: change.Path,
|
||||
Action: repository.FileActionIgnored,
|
||||
Warning: fmt.Errorf("resource was not processed because the parent folder could not be created"),
|
||||
})
|
||||
skipSpan.End()
|
||||
continue
|
||||
}
|
||||
|
||||
if err := resources.IsPathSupported(change.Path); err != nil {
|
||||
ensureFolderCtx, ensureFolderSpan := tracer.Start(ctx, "provisioning.sync.incremental.ensure_folder_path_exist")
|
||||
// Maintain the safe segment for empty folders
|
||||
@@ -98,7 +112,15 @@ func applyIncrementalChanges(ctx context.Context, diff []repository.VersionedFil
|
||||
if err != nil {
|
||||
ensureFolderSpan.RecordError(err)
|
||||
ensureFolderSpan.End()
|
||||
return nil, tracing.Error(span, fmt.Errorf("unable to create empty file folder: %w", err))
|
||||
|
||||
progress.Record(ensureFolderCtx, jobs.JobResourceResult{
|
||||
Path: change.Path,
|
||||
Action: repository.FileActionIgnored,
|
||||
Group: resources.FolderKind.Group,
|
||||
Kind: resources.FolderKind.Kind,
|
||||
Error: err,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
progress.Record(ensureFolderCtx, jobs.JobResourceResult{
|
||||
@@ -185,6 +207,7 @@ func cleanupOrphanedFolders(
|
||||
affectedFolders map[string]string,
|
||||
repositoryResources resources.RepositoryResources,
|
||||
tracer tracing.Tracer,
|
||||
progress jobs.JobProgressRecorder,
|
||||
) error {
|
||||
ctx, span := tracer.Start(ctx, "provisioning.sync.incremental.cleanup_orphaned_folders")
|
||||
defer span.End()
|
||||
@@ -198,6 +221,12 @@ func cleanupOrphanedFolders(
|
||||
for path, folderName := range affectedFolders {
|
||||
span.SetAttributes(attribute.String("folder", folderName))
|
||||
|
||||
// Check if any resources under this folder failed to delete
|
||||
if progress.HasDirPathFailedDeletion(path) {
|
||||
span.AddEvent("skipping orphaned folder cleanup: a child resource in its path failed to be deleted")
|
||||
continue
|
||||
}
|
||||
|
||||
// if we can no longer find the folder in git, then we can delete it from grafana
|
||||
_, err := readerRepo.Read(ctx, path, "")
|
||||
if err != nil && (errors.Is(err, repository.ErrFileNotFound) || apierrors.IsNotFound(err)) {
|
||||
|
||||
@@ -0,0 +1,623 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/grafana/grafana/apps/provisioning/pkg/repository"
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/resources"
|
||||
)
|
||||
|
||||
/*
|
||||
TestIncrementalSync_HierarchicalErrorHandling tests the hierarchical error handling behavior:
|
||||
|
||||
FOLDER CREATION FAILURES:
|
||||
- When EnsureFolderPathExist fails with PathCreationError, the path is tracked
|
||||
- Subsequent resources under that path are skipped with FileActionIgnored
|
||||
- Only the initial folder creation error counts toward error limits
|
||||
- WriteResourceFromFile can also return PathCreationError for implicit folder creation
|
||||
|
||||
FOLDER DELETION FAILURES (cleanupOrphanedFolders):
|
||||
- When RemoveResourceFromFile fails, path is tracked in failedDeletions
|
||||
- In cleanupOrphanedFolders, HasDirPathFailedDeletion() is checked before RemoveFolder
|
||||
- If children failed to delete, folder cleanup is skipped with a span event
|
||||
|
||||
DELETIONS NOT AFFECTED BY CREATION FAILURES:
|
||||
- HasDirPathFailedCreation is NOT checked for FileActionDeleted
|
||||
- Deletions proceed even if their parent folder failed to be created
|
||||
- This handles cleanup of resources that exist from previous syncs
|
||||
|
||||
RENAME OPERATIONS:
|
||||
- RenameResourceFile can return PathCreationError for the destination folder
|
||||
- Renames are affected by failed destination folder creation
|
||||
- Renames are NOT skipped due to source folder creation failures
|
||||
|
||||
AUTOMATIC TRACKING:
|
||||
- Record() automatically detects PathCreationError via errors.As() and adds to failedCreations
|
||||
- Record() automatically detects FileActionDeleted with error and adds to failedDeletions
|
||||
- No manual tracking calls needed
|
||||
*/
|
||||
func TestIncrementalSync_HierarchicalErrorHandling(t *testing.T) { // nolint:gocyclo
|
||||
tests := []struct {
|
||||
name string
|
||||
setupMocks func(*repository.MockVersioned, *resources.MockRepositoryResources, *jobs.MockJobProgressRecorder)
|
||||
changes []repository.VersionedFileChange
|
||||
previousRef string
|
||||
currentRef string
|
||||
description string
|
||||
expectError bool
|
||||
errorContains string
|
||||
}{
|
||||
{
|
||||
name: "folder creation fails, nested file skipped",
|
||||
description: "When unsupported/ fails to create via EnsureFolderPathExist, nested file is skipped",
|
||||
previousRef: "old-ref",
|
||||
currentRef: "new-ref",
|
||||
changes: []repository.VersionedFileChange{
|
||||
{Action: repository.FileActionCreated, Path: "unsupported/file.txt", Ref: "new-ref"},
|
||||
{Action: repository.FileActionCreated, Path: "unsupported/nested/file2.txt", Ref: "new-ref"},
|
||||
},
|
||||
setupMocks: func(repo *repository.MockVersioned, repoResources *resources.MockRepositoryResources, progress *jobs.MockJobProgressRecorder) {
|
||||
// First file triggers folder creation which fails
|
||||
progress.On("HasDirPathFailedCreation", "unsupported/file.txt").Return(false).Once()
|
||||
folderErr := &resources.PathCreationError{Path: "unsupported/", Err: fmt.Errorf("permission denied")}
|
||||
repoResources.On("EnsureFolderPathExist", mock.Anything, "unsupported/").Return("", folderErr).Once()
|
||||
|
||||
// First file recorded with error (note: error is from folder creation, but recorded against file)
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "unsupported/file.txt" &&
|
||||
r.Action == repository.FileActionIgnored &&
|
||||
r.Error != nil
|
||||
})).Return().Once()
|
||||
|
||||
// Second file is skipped because parent folder failed
|
||||
progress.On("HasDirPathFailedCreation", "unsupported/nested/file2.txt").Return(true).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "unsupported/nested/file2.txt" &&
|
||||
r.Action == repository.FileActionIgnored &&
|
||||
r.Warning != nil &&
|
||||
r.Warning.Error() == "resource was not processed because the parent folder could not be created"
|
||||
})).Return().Once()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "WriteResourceFromFile returns PathCreationError, nested resources skipped",
|
||||
description: "When WriteResourceFromFile implicitly creates a folder and fails, nested resources are skipped",
|
||||
previousRef: "old-ref",
|
||||
currentRef: "new-ref",
|
||||
changes: []repository.VersionedFileChange{
|
||||
{Action: repository.FileActionCreated, Path: "folder1/file1.json", Ref: "new-ref"},
|
||||
{Action: repository.FileActionCreated, Path: "folder1/file2.json", Ref: "new-ref"},
|
||||
{Action: repository.FileActionCreated, Path: "folder1/nested/file3.json", Ref: "new-ref"},
|
||||
},
|
||||
setupMocks: func(repo *repository.MockVersioned, repoResources *resources.MockRepositoryResources, progress *jobs.MockJobProgressRecorder) {
|
||||
// First file write fails with PathCreationError
|
||||
progress.On("HasDirPathFailedCreation", "folder1/file1.json").Return(false).Once()
|
||||
folderErr := &resources.PathCreationError{Path: "folder1/", Err: fmt.Errorf("permission denied")}
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "folder1/file1.json", "new-ref").
|
||||
Return("", schema.GroupVersionKind{}, folderErr).Once()
|
||||
|
||||
// First file recorded with error, automatically tracked
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/file1.json" &&
|
||||
r.Action == repository.FileActionCreated &&
|
||||
r.Error != nil
|
||||
})).Return().Once()
|
||||
|
||||
// Subsequent files are skipped
|
||||
progress.On("HasDirPathFailedCreation", "folder1/file2.json").Return(true).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/file2.json" && r.Action == repository.FileActionIgnored && r.Warning != nil
|
||||
})).Return().Once()
|
||||
|
||||
progress.On("HasDirPathFailedCreation", "folder1/nested/file3.json").Return(true).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/nested/file3.json" && r.Action == repository.FileActionIgnored && r.Warning != nil
|
||||
})).Return().Once()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "file deletion fails, folder cleanup skipped",
|
||||
description: "When RemoveResourceFromFile fails, cleanupOrphanedFolders skips folder removal",
|
||||
previousRef: "old-ref",
|
||||
currentRef: "new-ref",
|
||||
changes: []repository.VersionedFileChange{
|
||||
{Action: repository.FileActionDeleted, Path: "dashboards/file1.json", PreviousRef: "old-ref"},
|
||||
},
|
||||
setupMocks: func(repo *repository.MockVersioned, repoResources *resources.MockRepositoryResources, progress *jobs.MockJobProgressRecorder) {
|
||||
// File deletion fails (deletions don't check HasDirPathFailedCreation)
|
||||
repoResources.On("RemoveResourceFromFile", mock.Anything, "dashboards/file1.json", "old-ref").
|
||||
Return("dashboard-1", "folder-uid", schema.GroupVersionKind{Kind: "Dashboard"}, fmt.Errorf("permission denied")).Once()
|
||||
|
||||
// Error recorded, automatically tracked in failedDeletions
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "dashboards/file1.json" &&
|
||||
r.Action == repository.FileActionDeleted &&
|
||||
r.Error != nil
|
||||
})).Return().Once()
|
||||
|
||||
// During cleanup, folder deletion is skipped
|
||||
progress.On("HasDirPathFailedDeletion", "dashboards/").Return(true).Once()
|
||||
|
||||
// Note: RemoveFolder should NOT be called (verified via AssertNotCalled in test)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deletion proceeds despite creation failure",
|
||||
description: "When folder1/ creation fails, deletion of folder1/old.json still proceeds",
|
||||
previousRef: "old-ref",
|
||||
currentRef: "new-ref",
|
||||
changes: []repository.VersionedFileChange{
|
||||
{Action: repository.FileActionCreated, Path: "folder1/new.json", Ref: "new-ref"},
|
||||
{Action: repository.FileActionDeleted, Path: "folder1/old.json", PreviousRef: "old-ref"},
|
||||
},
|
||||
setupMocks: func(repo *repository.MockVersioned, repoResources *resources.MockRepositoryResources, progress *jobs.MockJobProgressRecorder) {
|
||||
// Creation fails
|
||||
progress.On("HasDirPathFailedCreation", "folder1/new.json").Return(false).Once()
|
||||
folderErr := &resources.PathCreationError{Path: "folder1/", Err: fmt.Errorf("permission denied")}
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "folder1/new.json", "new-ref").
|
||||
Return("", schema.GroupVersionKind{}, folderErr).Once()
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/new.json" && r.Error != nil
|
||||
})).Return().Once()
|
||||
|
||||
// Deletion proceeds (NOT checking HasDirPathFailedCreation for deletions)
|
||||
repoResources.On("RemoveResourceFromFile", mock.Anything, "folder1/old.json", "old-ref").
|
||||
Return("old-resource", "", schema.GroupVersionKind{Kind: "Dashboard"}, nil).Once()
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/old.json" &&
|
||||
r.Action == repository.FileActionDeleted &&
|
||||
r.Error == nil // Deletion succeeds!
|
||||
})).Return().Once()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multi-level nesting cascade",
|
||||
description: "When level1/ fails, level1/level2/level3/file.json is also skipped",
|
||||
previousRef: "old-ref",
|
||||
currentRef: "new-ref",
|
||||
changes: []repository.VersionedFileChange{
|
||||
{Action: repository.FileActionCreated, Path: "level1/file.txt", Ref: "new-ref"},
|
||||
{Action: repository.FileActionCreated, Path: "level1/level2/file.txt", Ref: "new-ref"},
|
||||
{Action: repository.FileActionCreated, Path: "level1/level2/level3/file.txt", Ref: "new-ref"},
|
||||
},
|
||||
setupMocks: func(repo *repository.MockVersioned, repoResources *resources.MockRepositoryResources, progress *jobs.MockJobProgressRecorder) {
|
||||
// First file triggers level1/ failure
|
||||
progress.On("HasDirPathFailedCreation", "level1/file.txt").Return(false).Once()
|
||||
folderErr := &resources.PathCreationError{Path: "level1/", Err: fmt.Errorf("permission denied")}
|
||||
repoResources.On("EnsureFolderPathExist", mock.Anything, "level1/").Return("", folderErr).Once()
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "level1/file.txt" && r.Action == repository.FileActionIgnored
|
||||
})).Return().Once()
|
||||
|
||||
// All nested files are skipped
|
||||
for _, path := range []string{"level1/level2/file.txt", "level1/level2/level3/file.txt"} {
|
||||
progress.On("HasDirPathFailedCreation", path).Return(true).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == path && r.Action == repository.FileActionIgnored
|
||||
})).Return().Once()
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mixed success and failure",
|
||||
description: "When success/ works and failure/ fails, only failure/* are skipped",
|
||||
previousRef: "old-ref",
|
||||
currentRef: "new-ref",
|
||||
changes: []repository.VersionedFileChange{
|
||||
{Action: repository.FileActionCreated, Path: "success/file1.json", Ref: "new-ref"},
|
||||
{Action: repository.FileActionCreated, Path: "success/nested/file2.json", Ref: "new-ref"},
|
||||
{Action: repository.FileActionCreated, Path: "failure/file3.txt", Ref: "new-ref"},
|
||||
{Action: repository.FileActionCreated, Path: "failure/nested/file4.txt", Ref: "new-ref"},
|
||||
},
|
||||
setupMocks: func(repo *repository.MockVersioned, repoResources *resources.MockRepositoryResources, progress *jobs.MockJobProgressRecorder) {
|
||||
// Success path works
|
||||
progress.On("HasDirPathFailedCreation", "success/file1.json").Return(false).Once()
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "success/file1.json", "new-ref").
|
||||
Return("resource-1", schema.GroupVersionKind{Kind: "Dashboard"}, nil).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "success/file1.json" && r.Error == nil
|
||||
})).Return().Once()
|
||||
|
||||
progress.On("HasDirPathFailedCreation", "success/nested/file2.json").Return(false).Once()
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "success/nested/file2.json", "new-ref").
|
||||
Return("resource-2", schema.GroupVersionKind{Kind: "Dashboard"}, nil).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "success/nested/file2.json" && r.Error == nil
|
||||
})).Return().Once()
|
||||
|
||||
// Failure path fails
|
||||
progress.On("HasDirPathFailedCreation", "failure/file3.txt").Return(false).Once()
|
||||
folderErr := &resources.PathCreationError{Path: "failure/", Err: fmt.Errorf("disk full")}
|
||||
repoResources.On("EnsureFolderPathExist", mock.Anything, "failure/").Return("", folderErr).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "failure/file3.txt" && r.Action == repository.FileActionIgnored
|
||||
})).Return().Once()
|
||||
|
||||
// Nested file in failure path is skipped
|
||||
progress.On("HasDirPathFailedCreation", "failure/nested/file4.txt").Return(true).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "failure/nested/file4.txt" && r.Action == repository.FileActionIgnored
|
||||
})).Return().Once()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "rename with failed destination folder",
|
||||
description: "When RenameResourceFile fails with PathCreationError for destination, rename is skipped",
|
||||
previousRef: "old-ref",
|
||||
currentRef: "new-ref",
|
||||
changes: []repository.VersionedFileChange{
|
||||
{
|
||||
Action: repository.FileActionRenamed,
|
||||
Path: "newfolder/file.json",
|
||||
PreviousPath: "oldfolder/file.json",
|
||||
Ref: "new-ref",
|
||||
PreviousRef: "old-ref",
|
||||
},
|
||||
},
|
||||
setupMocks: func(repo *repository.MockVersioned, repoResources *resources.MockRepositoryResources, progress *jobs.MockJobProgressRecorder) {
|
||||
// Rename fails with PathCreationError for destination folder
|
||||
progress.On("HasDirPathFailedCreation", "newfolder/file.json").Return(false).Once()
|
||||
folderErr := &resources.PathCreationError{Path: "newfolder/", Err: fmt.Errorf("permission denied")}
|
||||
repoResources.On("RenameResourceFile", mock.Anything, "oldfolder/file.json", "old-ref", "newfolder/file.json", "new-ref").
|
||||
Return("", "", schema.GroupVersionKind{}, folderErr).Once()
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "newfolder/file.json" &&
|
||||
r.Action == repository.FileActionRenamed &&
|
||||
r.Error != nil
|
||||
})).Return().Once()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "renamed file still checked, subsequent nested resources skipped",
|
||||
description: "After rename fails for folder1/file.json, other folder1/* files are skipped",
|
||||
previousRef: "old-ref",
|
||||
currentRef: "new-ref",
|
||||
changes: []repository.VersionedFileChange{
|
||||
{Action: repository.FileActionRenamed, Path: "folder1/file1.json", PreviousPath: "old/file1.json", Ref: "new-ref", PreviousRef: "old-ref"},
|
||||
{Action: repository.FileActionCreated, Path: "folder1/file2.json", Ref: "new-ref"},
|
||||
},
|
||||
setupMocks: func(repo *repository.MockVersioned, repoResources *resources.MockRepositoryResources, progress *jobs.MockJobProgressRecorder) {
|
||||
// Rename is NOT skipped for creation failures (it's checking the destination path)
|
||||
progress.On("HasDirPathFailedCreation", "folder1/file1.json").Return(true).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/file1.json" &&
|
||||
r.Action == repository.FileActionIgnored &&
|
||||
r.Warning != nil && r.Warning.Error() == "resource was not processed because the parent folder could not be created"
|
||||
})).Return().Once()
|
||||
|
||||
// Second file also skipped
|
||||
progress.On("HasDirPathFailedCreation", "folder1/file2.json").Return(true).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/file2.json" && r.Action == repository.FileActionIgnored && r.Warning != nil
|
||||
})).Return().Once()
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
runHierarchicalErrorHandlingTest(t, tt)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type compositeRepoForTest struct {
|
||||
*repository.MockVersioned
|
||||
*repository.MockReader
|
||||
}
|
||||
|
||||
func runHierarchicalErrorHandlingTest(t *testing.T, tt struct {
|
||||
name string
|
||||
setupMocks func(*repository.MockVersioned, *resources.MockRepositoryResources, *jobs.MockJobProgressRecorder)
|
||||
changes []repository.VersionedFileChange
|
||||
previousRef string
|
||||
currentRef string
|
||||
description string
|
||||
expectError bool
|
||||
errorContains string
|
||||
}) {
|
||||
var repo repository.Versioned
|
||||
mockVersioned := repository.NewMockVersioned(t)
|
||||
repoResources := resources.NewMockRepositoryResources(t)
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
|
||||
// For tests that need cleanup (folder deletion), use composite repo
|
||||
if tt.name == "file deletion fails, folder cleanup skipped" {
|
||||
mockReader := repository.NewMockReader(t)
|
||||
repo = &compositeRepoForTest{
|
||||
MockVersioned: mockVersioned,
|
||||
MockReader: mockReader,
|
||||
}
|
||||
} else {
|
||||
repo = mockVersioned
|
||||
}
|
||||
|
||||
mockVersioned.On("CompareFiles", mock.Anything, tt.previousRef, tt.currentRef).Return(tt.changes, nil)
|
||||
progress.On("SetTotal", mock.Anything, len(tt.changes)).Return()
|
||||
progress.On("SetMessage", mock.Anything, "replicating versioned changes").Return()
|
||||
progress.On("SetMessage", mock.Anything, "versioned changes replicated").Return()
|
||||
progress.On("TooManyErrors").Return(nil).Maybe()
|
||||
|
||||
tt.setupMocks(mockVersioned, repoResources, progress)
|
||||
|
||||
err := IncrementalSync(context.Background(), repo, tt.previousRef, tt.currentRef, repoResources, progress, tracing.NewNoopTracerService(), jobs.RegisterJobMetrics(prometheus.NewPedanticRegistry()))
|
||||
|
||||
if tt.expectError {
|
||||
require.Error(t, err)
|
||||
if tt.errorContains != "" {
|
||||
require.Contains(t, err.Error(), tt.errorContains)
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
progress.AssertExpectations(t)
|
||||
repoResources.AssertExpectations(t)
|
||||
// For deletion tests, verify RemoveFolder was NOT called
|
||||
if tt.name == "file deletion fails, folder cleanup skipped" {
|
||||
repoResources.AssertNotCalled(t, "RemoveFolder", mock.Anything, mock.Anything)
|
||||
}
|
||||
}
|
||||
|
||||
// TestIncrementalSync_HierarchicalErrorHandling_FailedFolderCreation tests nested resource skipping
|
||||
func TestIncrementalSync_HierarchicalErrorHandling_FailedFolderCreation(t *testing.T) {
|
||||
repo := repository.NewMockVersioned(t)
|
||||
repoResources := resources.NewMockRepositoryResources(t)
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
|
||||
changes := []repository.VersionedFileChange{
|
||||
{Action: repository.FileActionCreated, Path: "unsupported/file.txt", Ref: "new-ref"},
|
||||
{Action: repository.FileActionCreated, Path: "unsupported/subfolder/file2.txt", Ref: "new-ref"},
|
||||
{Action: repository.FileActionCreated, Path: "unsupported/file3.json", Ref: "new-ref"},
|
||||
{Action: repository.FileActionCreated, Path: "other/file.json", Ref: "new-ref"},
|
||||
}
|
||||
|
||||
repo.On("CompareFiles", mock.Anything, "old-ref", "new-ref").Return(changes, nil)
|
||||
progress.On("SetTotal", mock.Anything, 4).Return()
|
||||
progress.On("SetMessage", mock.Anything, "replicating versioned changes").Return()
|
||||
progress.On("SetMessage", mock.Anything, "versioned changes replicated").Return()
|
||||
progress.On("TooManyErrors").Return(nil).Maybe()
|
||||
|
||||
folderErr := &resources.PathCreationError{Path: "unsupported/", Err: fmt.Errorf("permission denied")}
|
||||
// First check is before it fails.
|
||||
progress.On("HasDirPathFailedCreation", "unsupported/file.txt").Return(false).Once()
|
||||
repoResources.On("EnsureFolderPathExist", mock.Anything, "unsupported/").Return("", folderErr).Once()
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "unsupported/file.txt" && r.Action == repository.FileActionIgnored && r.Error != nil
|
||||
})).Return().Once()
|
||||
|
||||
progress.On("HasDirPathFailedCreation", "unsupported/subfolder/file2.txt").Return(true).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "unsupported/subfolder/file2.txt" && r.Action == repository.FileActionIgnored &&
|
||||
r.Warning != nil && r.Warning.Error() == "resource was not processed because the parent folder could not be created"
|
||||
})).Return().Once()
|
||||
|
||||
progress.On("HasDirPathFailedCreation", "unsupported/file3.json").Return(true).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "unsupported/file3.json" && r.Action == repository.FileActionIgnored &&
|
||||
r.Warning != nil && r.Warning.Error() == "resource was not processed because the parent folder could not be created"
|
||||
})).Return().Once()
|
||||
|
||||
progress.On("HasDirPathFailedCreation", "other/file.json").Return(false).Once()
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "other/file.json", "new-ref").
|
||||
Return("test-resource", schema.GroupVersionKind{Kind: "Dashboard"}, nil).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "other/file.json" && r.Action == repository.FileActionCreated && r.Error == nil
|
||||
})).Return().Once()
|
||||
|
||||
err := IncrementalSync(context.Background(), repo, "old-ref", "new-ref", repoResources, progress, tracing.NewNoopTracerService(), jobs.RegisterJobMetrics(prometheus.NewPedanticRegistry()))
|
||||
require.NoError(t, err)
|
||||
progress.AssertExpectations(t)
|
||||
}
|
||||
|
||||
// TestIncrementalSync_HierarchicalErrorHandling_FailedFileDeletion tests folder cleanup prevention
|
||||
func TestIncrementalSync_HierarchicalErrorHandling_FailedFileDeletion(t *testing.T) {
|
||||
mockVersioned := repository.NewMockVersioned(t)
|
||||
mockReader := repository.NewMockReader(t)
|
||||
repo := &compositeRepoForTest{MockVersioned: mockVersioned, MockReader: mockReader}
|
||||
repoResources := resources.NewMockRepositoryResources(t)
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
|
||||
changes := []repository.VersionedFileChange{
|
||||
{Action: repository.FileActionDeleted, Path: "dashboards/file1.json", PreviousRef: "old-ref"},
|
||||
}
|
||||
|
||||
mockVersioned.On("CompareFiles", mock.Anything, "old-ref", "new-ref").Return(changes, nil)
|
||||
progress.On("SetTotal", mock.Anything, 1).Return()
|
||||
progress.On("SetMessage", mock.Anything, "replicating versioned changes").Return()
|
||||
progress.On("SetMessage", mock.Anything, "versioned changes replicated").Return()
|
||||
progress.On("TooManyErrors").Return(nil).Maybe()
|
||||
|
||||
// Deletions don't check HasDirPathFailedCreation, they go straight to removal
|
||||
repoResources.On("RemoveResourceFromFile", mock.Anything, "dashboards/file1.json", "old-ref").
|
||||
Return("dashboard-1", "folder-uid", schema.GroupVersionKind{Kind: "Dashboard"}, fmt.Errorf("permission denied")).Once()
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "dashboards/file1.json" && r.Action == repository.FileActionDeleted &&
|
||||
r.Error != nil && r.Error.Error() == "removing resource from file dashboards/file1.json: permission denied"
|
||||
})).Return().Once()
|
||||
|
||||
progress.On("HasDirPathFailedDeletion", "dashboards/").Return(true).Once()
|
||||
|
||||
err := IncrementalSync(context.Background(), repo, "old-ref", "new-ref", repoResources, progress, tracing.NewNoopTracerService(), jobs.RegisterJobMetrics(prometheus.NewPedanticRegistry()))
|
||||
require.NoError(t, err)
|
||||
progress.AssertExpectations(t)
|
||||
repoResources.AssertNotCalled(t, "RemoveFolder", mock.Anything, mock.Anything)
|
||||
}
|
||||
|
||||
// TestIncrementalSync_HierarchicalErrorHandling_DeletionNotAffectedByCreationFailure tests deletions proceed despite creation failures
|
||||
func TestIncrementalSync_HierarchicalErrorHandling_DeletionNotAffectedByCreationFailure(t *testing.T) {
|
||||
repo := repository.NewMockVersioned(t)
|
||||
repoResources := resources.NewMockRepositoryResources(t)
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
|
||||
changes := []repository.VersionedFileChange{
|
||||
{Action: repository.FileActionCreated, Path: "folder1/file.json", Ref: "new-ref"},
|
||||
{Action: repository.FileActionDeleted, Path: "folder1/old.json", PreviousRef: "old-ref"},
|
||||
}
|
||||
|
||||
repo.On("CompareFiles", mock.Anything, "old-ref", "new-ref").Return(changes, nil)
|
||||
progress.On("SetTotal", mock.Anything, 2).Return()
|
||||
progress.On("SetMessage", mock.Anything, "replicating versioned changes").Return()
|
||||
progress.On("SetMessage", mock.Anything, "versioned changes replicated").Return()
|
||||
progress.On("TooManyErrors").Return(nil).Maybe()
|
||||
|
||||
// Creation fails
|
||||
progress.On("HasDirPathFailedCreation", "folder1/file.json").Return(false).Once()
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "folder1/file.json", "new-ref").
|
||||
Return("", schema.GroupVersionKind{}, &resources.PathCreationError{Path: "folder1/", Err: fmt.Errorf("permission denied")}).Once()
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/file.json" && r.Error != nil
|
||||
})).Return().Once()
|
||||
|
||||
// Deletion should NOT be skipped (not checking HasDirPathFailedCreation for deletions)
|
||||
// Deletions don't check HasDirPathFailedCreation, they go straight to removal
|
||||
repoResources.On("RemoveResourceFromFile", mock.Anything, "folder1/old.json", "old-ref").
|
||||
Return("old-resource", "", schema.GroupVersionKind{Kind: "Dashboard"}, nil).Once()
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "folder1/old.json" && r.Action == repository.FileActionDeleted && r.Error == nil
|
||||
})).Return().Once()
|
||||
|
||||
err := IncrementalSync(context.Background(), repo, "old-ref", "new-ref", repoResources, progress, tracing.NewNoopTracerService(), jobs.RegisterJobMetrics(prometheus.NewPedanticRegistry()))
|
||||
require.NoError(t, err)
|
||||
progress.AssertExpectations(t)
|
||||
}
|
||||
|
||||
// TestIncrementalSync_HierarchicalErrorHandling_MultiLevelNesting tests multi-level cascade
|
||||
func TestIncrementalSync_HierarchicalErrorHandling_MultiLevelNesting(t *testing.T) {
|
||||
repo := repository.NewMockVersioned(t)
|
||||
repoResources := resources.NewMockRepositoryResources(t)
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
|
||||
changes := []repository.VersionedFileChange{
|
||||
{Action: repository.FileActionCreated, Path: "level1/file.txt", Ref: "new-ref"},
|
||||
{Action: repository.FileActionCreated, Path: "level1/level2/file.txt", Ref: "new-ref"},
|
||||
{Action: repository.FileActionCreated, Path: "level1/level2/level3/file.txt", Ref: "new-ref"},
|
||||
}
|
||||
|
||||
repo.On("CompareFiles", mock.Anything, "old-ref", "new-ref").Return(changes, nil)
|
||||
progress.On("SetTotal", mock.Anything, 3).Return()
|
||||
progress.On("SetMessage", mock.Anything, "replicating versioned changes").Return()
|
||||
progress.On("SetMessage", mock.Anything, "versioned changes replicated").Return()
|
||||
progress.On("TooManyErrors").Return(nil).Maybe()
|
||||
|
||||
folderErr := &resources.PathCreationError{Path: "level1/", Err: fmt.Errorf("permission denied")}
|
||||
// First check is before it fails.
|
||||
progress.On("HasDirPathFailedCreation", "level1/file.txt").Return(false).Once()
|
||||
repoResources.On("EnsureFolderPathExist", mock.Anything, "level1/").Return("", folderErr).Once()
|
||||
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "level1/file.txt" && r.Action == repository.FileActionIgnored && r.Error != nil
|
||||
})).Return().Once()
|
||||
|
||||
progress.On("HasDirPathFailedCreation", "level1/level2/file.txt").Return(true).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "level1/level2/file.txt" && r.Action == repository.FileActionIgnored &&
|
||||
r.Warning != nil && r.Warning.Error() == "resource was not processed because the parent folder could not be created"
|
||||
})).Return().Once()
|
||||
|
||||
progress.On("HasDirPathFailedCreation", "level1/level2/level3/file.txt").Return(true).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "level1/level2/level3/file.txt" && r.Action == repository.FileActionIgnored &&
|
||||
r.Warning != nil && r.Warning.Error() == "resource was not processed because the parent folder could not be created"
|
||||
})).Return().Once()
|
||||
|
||||
err := IncrementalSync(context.Background(), repo, "old-ref", "new-ref", repoResources, progress, tracing.NewNoopTracerService(), jobs.RegisterJobMetrics(prometheus.NewPedanticRegistry()))
|
||||
require.NoError(t, err)
|
||||
progress.AssertExpectations(t)
|
||||
}
|
||||
|
||||
// TestIncrementalSync_HierarchicalErrorHandling_MixedSuccessAndFailure tests partial failures
|
||||
func TestIncrementalSync_HierarchicalErrorHandling_MixedSuccessAndFailure(t *testing.T) {
|
||||
repo := repository.NewMockVersioned(t)
|
||||
repoResources := resources.NewMockRepositoryResources(t)
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
|
||||
changes := []repository.VersionedFileChange{
|
||||
{Action: repository.FileActionCreated, Path: "success/file1.json", Ref: "new-ref"},
|
||||
{Action: repository.FileActionCreated, Path: "success/nested/file2.json", Ref: "new-ref"},
|
||||
{Action: repository.FileActionCreated, Path: "failure/file3.txt", Ref: "new-ref"},
|
||||
{Action: repository.FileActionCreated, Path: "failure/nested/file4.txt", Ref: "new-ref"},
|
||||
}
|
||||
|
||||
repo.On("CompareFiles", mock.Anything, "old-ref", "new-ref").Return(changes, nil)
|
||||
progress.On("SetTotal", mock.Anything, 4).Return()
|
||||
progress.On("SetMessage", mock.Anything, "replicating versioned changes").Return()
|
||||
progress.On("SetMessage", mock.Anything, "versioned changes replicated").Return()
|
||||
progress.On("TooManyErrors").Return(nil).Maybe()
|
||||
|
||||
progress.On("HasDirPathFailedCreation", "success/file1.json").Return(false).Once()
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "success/file1.json", "new-ref").
|
||||
Return("resource-1", schema.GroupVersionKind{Kind: "Dashboard"}, nil).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "success/file1.json" && r.Action == repository.FileActionCreated && r.Error == nil
|
||||
})).Return().Once()
|
||||
|
||||
progress.On("HasDirPathFailedCreation", "success/nested/file2.json").Return(false).Once()
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "success/nested/file2.json", "new-ref").
|
||||
Return("resource-2", schema.GroupVersionKind{Kind: "Dashboard"}, nil).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "success/nested/file2.json" && r.Action == repository.FileActionCreated && r.Error == nil
|
||||
})).Return().Once()
|
||||
|
||||
folderErr := &resources.PathCreationError{Path: "failure/", Err: fmt.Errorf("disk full")}
|
||||
progress.On("HasDirPathFailedCreation", "failure/file3.txt").Return(false).Once()
|
||||
repoResources.On("EnsureFolderPathExist", mock.Anything, "failure/").Return("", folderErr).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "failure/file3.txt" && r.Action == repository.FileActionIgnored
|
||||
})).Return().Once()
|
||||
|
||||
progress.On("HasDirPathFailedCreation", "failure/nested/file4.txt").Return(true).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "failure/nested/file4.txt" && r.Action == repository.FileActionIgnored &&
|
||||
r.Warning != nil && r.Warning.Error() == "resource was not processed because the parent folder could not be created"
|
||||
})).Return().Once()
|
||||
|
||||
err := IncrementalSync(context.Background(), repo, "old-ref", "new-ref", repoResources, progress, tracing.NewNoopTracerService(), jobs.RegisterJobMetrics(prometheus.NewPedanticRegistry()))
|
||||
require.NoError(t, err)
|
||||
progress.AssertExpectations(t)
|
||||
repoResources.AssertExpectations(t)
|
||||
}
|
||||
|
||||
// TestIncrementalSync_HierarchicalErrorHandling_RenameWithFailedFolderCreation tests rename operations affected by folder failures
|
||||
func TestIncrementalSync_HierarchicalErrorHandling_RenameWithFailedFolderCreation(t *testing.T) {
|
||||
repo := repository.NewMockVersioned(t)
|
||||
repoResources := resources.NewMockRepositoryResources(t)
|
||||
progress := jobs.NewMockJobProgressRecorder(t)
|
||||
|
||||
changes := []repository.VersionedFileChange{
|
||||
{Action: repository.FileActionRenamed, Path: "newfolder/file.json", PreviousPath: "oldfolder/file.json", Ref: "new-ref", PreviousRef: "old-ref"},
|
||||
}
|
||||
|
||||
repo.On("CompareFiles", mock.Anything, "old-ref", "new-ref").Return(changes, nil)
|
||||
progress.On("SetTotal", mock.Anything, 1).Return()
|
||||
progress.On("SetMessage", mock.Anything, "replicating versioned changes").Return()
|
||||
progress.On("SetMessage", mock.Anything, "versioned changes replicated").Return()
|
||||
progress.On("TooManyErrors").Return(nil).Maybe()
|
||||
|
||||
progress.On("HasDirPathFailedCreation", "newfolder/file.json").Return(true).Once()
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(r jobs.JobResourceResult) bool {
|
||||
return r.Path == "newfolder/file.json" && r.Action == repository.FileActionIgnored &&
|
||||
r.Warning != nil && r.Warning.Error() == "resource was not processed because the parent folder could not be created"
|
||||
})).Return().Once()
|
||||
|
||||
err := IncrementalSync(context.Background(), repo, "old-ref", "new-ref", repoResources, progress, tracing.NewNoopTracerService(), jobs.RegisterJobMetrics(prometheus.NewPedanticRegistry()))
|
||||
require.NoError(t, err)
|
||||
progress.AssertExpectations(t)
|
||||
}
|
||||
@@ -92,6 +92,10 @@ func TestIncrementalSync(t *testing.T) {
|
||||
progress.On("SetMessage", mock.Anything, "replicating versioned changes").Return()
|
||||
progress.On("SetMessage", mock.Anything, "versioned changes replicated").Return()
|
||||
|
||||
// Mock HasDirPathFailedCreation checks
|
||||
progress.On("HasDirPathFailedCreation", "dashboards/test.json").Return(false)
|
||||
progress.On("HasDirPathFailedCreation", "alerts/alert.yaml").Return(false)
|
||||
|
||||
// Mock successful resource writes
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "dashboards/test.json", "new-ref").
|
||||
Return("test-dashboard", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, nil)
|
||||
@@ -127,6 +131,9 @@ func TestIncrementalSync(t *testing.T) {
|
||||
progress.On("SetMessage", mock.Anything, "replicating versioned changes").Return()
|
||||
progress.On("SetMessage", mock.Anything, "versioned changes replicated").Return()
|
||||
|
||||
// Mock HasDirPathFailedCreation check
|
||||
progress.On("HasDirPathFailedCreation", "unsupported/path/file.txt").Return(false)
|
||||
|
||||
// Mock folder creation
|
||||
repoResources.On("EnsureFolderPathExist", mock.Anything, "unsupported/path/").
|
||||
Return("test-folder", nil)
|
||||
@@ -161,6 +168,9 @@ func TestIncrementalSync(t *testing.T) {
|
||||
progress.On("SetMessage", mock.Anything, "replicating versioned changes").Return()
|
||||
progress.On("SetMessage", mock.Anything, "versioned changes replicated").Return()
|
||||
|
||||
// Mock HasDirPathFailedCreation check
|
||||
progress.On("HasDirPathFailedCreation", ".unsupported/path/file.txt").Return(false)
|
||||
|
||||
progress.On("Record", mock.Anything, jobs.JobResourceResult{
|
||||
Action: repository.FileActionIgnored,
|
||||
Path: ".unsupported/path/file.txt",
|
||||
@@ -222,6 +232,9 @@ func TestIncrementalSync(t *testing.T) {
|
||||
progress.On("SetMessage", mock.Anything, "replicating versioned changes").Return()
|
||||
progress.On("SetMessage", mock.Anything, "versioned changes replicated").Return()
|
||||
|
||||
// Mock HasDirPathFailedCreation check
|
||||
progress.On("HasDirPathFailedCreation", "dashboards/new.json").Return(false)
|
||||
|
||||
// Mock resource rename
|
||||
repoResources.On("RenameResourceFile", mock.Anything, "dashboards/old.json", "old-ref", "dashboards/new.json", "new-ref").
|
||||
Return("renamed-dashboard", "", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, nil)
|
||||
@@ -254,6 +267,10 @@ func TestIncrementalSync(t *testing.T) {
|
||||
progress.On("SetTotal", mock.Anything, 1).Return()
|
||||
progress.On("SetMessage", mock.Anything, "replicating versioned changes").Return()
|
||||
progress.On("SetMessage", mock.Anything, "versioned changes replicated").Return()
|
||||
|
||||
// Mock HasDirPathFailedCreation check
|
||||
progress.On("HasDirPathFailedCreation", "dashboards/ignored.json").Return(false)
|
||||
|
||||
progress.On("Record", mock.Anything, jobs.JobResourceResult{
|
||||
Action: repository.FileActionIgnored,
|
||||
Path: "dashboards/ignored.json",
|
||||
@@ -277,16 +294,28 @@ func TestIncrementalSync(t *testing.T) {
|
||||
repo.On("CompareFiles", mock.Anything, "old-ref", "new-ref").Return(changes, nil)
|
||||
progress.On("SetTotal", mock.Anything, 1).Return()
|
||||
progress.On("SetMessage", mock.Anything, "replicating versioned changes").Return()
|
||||
progress.On("SetMessage", mock.Anything, "versioned changes replicated").Return()
|
||||
|
||||
// Mock HasDirPathFailedCreation check
|
||||
progress.On("HasDirPathFailedCreation", "unsupported/path/file.txt").Return(false)
|
||||
|
||||
// Mock folder creation error
|
||||
repoResources.On("EnsureFolderPathExist", mock.Anything, "unsupported/path/").
|
||||
Return("", fmt.Errorf("failed to create folder"))
|
||||
|
||||
// Mock progress recording with error
|
||||
progress.On("Record", mock.Anything, mock.MatchedBy(func(result jobs.JobResourceResult) bool {
|
||||
return result.Action == repository.FileActionIgnored &&
|
||||
result.Path == "unsupported/path/file.txt" &&
|
||||
result.Error != nil &&
|
||||
result.Error.Error() == "failed to create folder"
|
||||
})).Return()
|
||||
|
||||
progress.On("TooManyErrors").Return(nil)
|
||||
},
|
||||
previousRef: "old-ref",
|
||||
currentRef: "new-ref",
|
||||
expectedError: "unable to create empty file folder: failed to create folder",
|
||||
expectedCalls: 1,
|
||||
},
|
||||
{
|
||||
name: "error writing resource",
|
||||
@@ -303,6 +332,9 @@ func TestIncrementalSync(t *testing.T) {
|
||||
progress.On("SetMessage", mock.Anything, "replicating versioned changes").Return()
|
||||
progress.On("SetMessage", mock.Anything, "versioned changes replicated").Return()
|
||||
|
||||
// Mock HasDirPathFailedCreation check
|
||||
progress.On("HasDirPathFailedCreation", "dashboards/test.json").Return(false)
|
||||
|
||||
// Mock resource write error
|
||||
repoResources.On("WriteResourceFromFile", mock.Anything, "dashboards/test.json", "new-ref").
|
||||
Return("test-dashboard", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, fmt.Errorf("write failed"))
|
||||
@@ -372,7 +404,8 @@ func TestIncrementalSync(t *testing.T) {
|
||||
repo.On("CompareFiles", mock.Anything, "old-ref", "new-ref").Return(changes, nil)
|
||||
progress.On("SetTotal", mock.Anything, 1).Return()
|
||||
progress.On("SetMessage", mock.Anything, "replicating versioned changes").Return()
|
||||
// Mock too many errors
|
||||
|
||||
// Mock too many errors - this is checked before processing files, so HasDirPathFailedCreation won't be called
|
||||
progress.On("TooManyErrors").Return(fmt.Errorf("too many errors occurred"))
|
||||
},
|
||||
previousRef: "old-ref",
|
||||
@@ -428,6 +461,9 @@ func TestIncrementalSync_CleanupOrphanedFolders(t *testing.T) {
|
||||
repoResources.On("RemoveResourceFromFile", mock.Anything, "dashboards/old.json", "old-ref").
|
||||
Return("old-dashboard", "folder-uid", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, nil)
|
||||
|
||||
// Mock HasDirPathFailedDeletion check for cleanup
|
||||
progress.On("HasDirPathFailedDeletion", "dashboards/").Return(false)
|
||||
|
||||
// if the folder is not found in git, there should be a call to remove the folder from grafana
|
||||
repo.MockReader.On("Read", mock.Anything, "dashboards/", "").
|
||||
Return((*repository.FileInfo)(nil), repository.ErrFileNotFound)
|
||||
@@ -453,6 +489,10 @@ func TestIncrementalSync_CleanupOrphanedFolders(t *testing.T) {
|
||||
progress.On("SetMessage", mock.Anything, "versioned changes replicated").Return()
|
||||
repoResources.On("RemoveResourceFromFile", mock.Anything, "dashboards/old.json", "old-ref").
|
||||
Return("old-dashboard", "folder-uid", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, nil)
|
||||
|
||||
// Mock HasDirPathFailedDeletion check for cleanup
|
||||
progress.On("HasDirPathFailedDeletion", "dashboards/").Return(false)
|
||||
|
||||
// if the folder still exists in git, there should not be a call to delete it from grafana
|
||||
repo.MockReader.On("Read", mock.Anything, "dashboards/", "").
|
||||
Return(&repository.FileInfo{}, nil)
|
||||
@@ -485,6 +525,13 @@ func TestIncrementalSync_CleanupOrphanedFolders(t *testing.T) {
|
||||
repoResources.On("RemoveResourceFromFile", mock.Anything, "alerts/old-alert.yaml", "old-ref").
|
||||
Return("old-alert", "folder-uid-2", schema.GroupVersionKind{Kind: "Alert", Group: "alerts"}, nil)
|
||||
|
||||
progress.On("Record", mock.Anything, mock.Anything).Return()
|
||||
progress.On("TooManyErrors").Return(nil)
|
||||
|
||||
// Mock HasDirPathFailedDeletion checks for cleanup
|
||||
progress.On("HasDirPathFailedDeletion", "dashboards/").Return(false)
|
||||
progress.On("HasDirPathFailedDeletion", "alerts/").Return(false)
|
||||
|
||||
// both not found in git, both should be deleted
|
||||
repo.MockReader.On("Read", mock.Anything, "dashboards/", "").
|
||||
Return((*repository.FileInfo)(nil), repository.ErrFileNotFound)
|
||||
@@ -492,9 +539,6 @@ func TestIncrementalSync_CleanupOrphanedFolders(t *testing.T) {
|
||||
Return((*repository.FileInfo)(nil), repository.ErrFileNotFound)
|
||||
repoResources.On("RemoveFolder", mock.Anything, "folder-uid-1").Return(nil)
|
||||
repoResources.On("RemoveFolder", mock.Anything, "folder-uid-2").Return(nil)
|
||||
|
||||
progress.On("Record", mock.Anything, mock.Anything).Return()
|
||||
progress.On("TooManyErrors").Return(nil)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -559,6 +559,22 @@ func (b *APIBuilder) InstallSchema(scheme *runtime.Scheme) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Register custom field label conversion for Repository to enable field selectors like spec.connection.name
|
||||
err = scheme.AddFieldLabelConversionFunc(
|
||||
provisioning.SchemeGroupVersion.WithKind("Repository"),
|
||||
func(label, value string) (string, string, error) {
|
||||
switch label {
|
||||
case "metadata.name", "metadata.namespace", "spec.connection.name":
|
||||
return label, value, nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("field label not supported for Repository: %s", label)
|
||||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
metav1.AddToGroupVersion(scheme, provisioning.SchemeGroupVersion)
|
||||
// Only 1 version (for now?)
|
||||
return scheme.SetVersionPriority(provisioning.SchemeGroupVersion)
|
||||
@@ -569,10 +585,19 @@ func (b *APIBuilder) AllowedV0Alpha1Resources() []string {
|
||||
}
|
||||
|
||||
func (b *APIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver.APIGroupInfo, opts builder.APIGroupOptions) error {
|
||||
repositoryStorage, err := grafanaregistry.NewRegistryStore(opts.Scheme, provisioning.RepositoryResourceInfo, opts.OptsGetter)
|
||||
// Create repository storage with custom field selectors (e.g., spec.connection.name)
|
||||
repositoryStorage, err := grafanaregistry.NewRegistryStoreWithSelectableFields(
|
||||
opts.Scheme,
|
||||
provisioning.RepositoryResourceInfo,
|
||||
opts.OptsGetter,
|
||||
grafanaregistry.SelectableFieldsOptions{
|
||||
GetAttrs: RepositoryGetAttrs,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create repository storage: %w", err)
|
||||
}
|
||||
|
||||
repositoryStatusStorage := grafanaregistry.NewRegistryStatusStore(opts.Scheme, repositoryStorage)
|
||||
b.store = repositoryStorage
|
||||
|
||||
|
||||
44
pkg/registry/apis/provisioning/repository_fields.go
Normal file
44
pkg/registry/apis/provisioning/repository_fields.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package provisioning
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
|
||||
provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
|
||||
)
|
||||
|
||||
// RepositoryToSelectableFields returns a field set that can be used for field selectors.
|
||||
// This includes standard metadata fields plus custom fields like spec.connection.name.
|
||||
func RepositoryToSelectableFields(obj *provisioning.Repository) fields.Set {
|
||||
objectMetaFields := generic.ObjectMetaFieldsSet(&obj.ObjectMeta, true)
|
||||
|
||||
// Add custom selectable fields
|
||||
specificFields := fields.Set{
|
||||
"spec.connection.name": getConnectionName(obj),
|
||||
}
|
||||
|
||||
return generic.MergeFieldsSets(objectMetaFields, specificFields)
|
||||
}
|
||||
|
||||
// getConnectionName safely extracts the connection name from a Repository.
|
||||
// Returns empty string if no connection is configured.
|
||||
func getConnectionName(obj *provisioning.Repository) string {
|
||||
if obj == nil || obj.Spec.Connection == nil {
|
||||
return ""
|
||||
}
|
||||
return obj.Spec.Connection.Name
|
||||
}
|
||||
|
||||
// RepositoryGetAttrs returns labels and fields of a Repository object.
|
||||
// This is used by the storage layer for filtering.
|
||||
func RepositoryGetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) {
|
||||
repo, ok := obj.(*provisioning.Repository)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("given object is not a Repository")
|
||||
}
|
||||
return labels.Set(repo.Labels), RepositoryToSelectableFields(repo), nil
|
||||
}
|
||||
184
pkg/registry/apis/provisioning/repository_fields_test.go
Normal file
184
pkg/registry/apis/provisioning/repository_fields_test.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package provisioning
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
|
||||
)
|
||||
|
||||
func TestGetConnectionName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
repo *provisioning.Repository
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "nil repository returns empty string",
|
||||
repo: nil,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "repository without connection returns empty string",
|
||||
repo: &provisioning.Repository{
|
||||
Spec: provisioning.RepositorySpec{
|
||||
Title: "test-repo",
|
||||
},
|
||||
},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "repository with connection returns connection name",
|
||||
repo: &provisioning.Repository{
|
||||
Spec: provisioning.RepositorySpec{
|
||||
Title: "test-repo",
|
||||
Connection: &provisioning.ConnectionInfo{
|
||||
Name: "my-connection",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: "my-connection",
|
||||
},
|
||||
{
|
||||
name: "repository with empty connection name returns empty string",
|
||||
repo: &provisioning.Repository{
|
||||
Spec: provisioning.RepositorySpec{
|
||||
Title: "test-repo",
|
||||
Connection: &provisioning.ConnectionInfo{
|
||||
Name: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := getConnectionName(tt.repo)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepositoryToSelectableFields(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
repo *provisioning.Repository
|
||||
expectedFields map[string]string
|
||||
}{
|
||||
{
|
||||
name: "includes metadata.name and metadata.namespace",
|
||||
repo: &provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-repo",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: provisioning.RepositorySpec{
|
||||
Title: "Test Repository",
|
||||
},
|
||||
},
|
||||
expectedFields: map[string]string{
|
||||
"metadata.name": "test-repo",
|
||||
"metadata.namespace": "default",
|
||||
"spec.connection.name": "",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "includes spec.connection.name when set",
|
||||
repo: &provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "repo-with-connection",
|
||||
Namespace: "org-1",
|
||||
},
|
||||
Spec: provisioning.RepositorySpec{
|
||||
Title: "Repo With Connection",
|
||||
Connection: &provisioning.ConnectionInfo{
|
||||
Name: "github-connection",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedFields: map[string]string{
|
||||
"metadata.name": "repo-with-connection",
|
||||
"metadata.namespace": "org-1",
|
||||
"spec.connection.name": "github-connection",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fields := RepositoryToSelectableFields(tt.repo)
|
||||
|
||||
for key, expectedValue := range tt.expectedFields {
|
||||
actualValue, exists := fields[key]
|
||||
assert.True(t, exists, "field %s should exist", key)
|
||||
assert.Equal(t, expectedValue, actualValue, "field %s should have correct value", key)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepositoryGetAttrs(t *testing.T) {
|
||||
t.Run("returns error for non-Repository object", func(t *testing.T) {
|
||||
// Pass a different runtime.Object type instead of a Repository
|
||||
connection := &provisioning.Connection{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "not-a-repository",
|
||||
},
|
||||
}
|
||||
_, _, err := RepositoryGetAttrs(connection)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not a Repository")
|
||||
})
|
||||
|
||||
t.Run("returns labels and fields for valid Repository", func(t *testing.T) {
|
||||
repo := &provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-repo",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"app": "grafana",
|
||||
"env": "test",
|
||||
},
|
||||
},
|
||||
Spec: provisioning.RepositorySpec{
|
||||
Title: "Test Repository",
|
||||
Connection: &provisioning.ConnectionInfo{
|
||||
Name: "my-connection",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
labels, fields, err := RepositoryGetAttrs(repo)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check labels
|
||||
assert.Equal(t, "grafana", labels["app"])
|
||||
assert.Equal(t, "test", labels["env"])
|
||||
|
||||
// Check fields
|
||||
assert.Equal(t, "test-repo", fields["metadata.name"])
|
||||
assert.Equal(t, "default", fields["metadata.namespace"])
|
||||
assert.Equal(t, "my-connection", fields["spec.connection.name"])
|
||||
})
|
||||
|
||||
t.Run("returns empty connection name when not set", func(t *testing.T) {
|
||||
repo := &provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-repo",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: provisioning.RepositorySpec{
|
||||
Title: "Test Repository",
|
||||
},
|
||||
}
|
||||
|
||||
_, fields, err := RepositoryGetAttrs(repo)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "", fields["spec.connection.name"])
|
||||
})
|
||||
}
|
||||
@@ -20,6 +20,21 @@ import (
|
||||
|
||||
const MaxNumberOfFolders = 10000
|
||||
|
||||
// PathCreationError represents an error that occurred while creating a folder path.
|
||||
// It contains the path that failed and the underlying error.
|
||||
type PathCreationError struct {
|
||||
Path string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *PathCreationError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
func (e *PathCreationError) Error() string {
|
||||
return fmt.Sprintf("failed to create path %s: %v", e.Path, e.Err)
|
||||
}
|
||||
|
||||
type FolderManager struct {
|
||||
repo repository.ReaderWriter
|
||||
tree FolderTree
|
||||
@@ -73,7 +88,11 @@ func (fm *FolderManager) EnsureFolderPathExist(ctx context.Context, filePath str
|
||||
}
|
||||
|
||||
if err := fm.EnsureFolderExists(ctx, f, parent); err != nil {
|
||||
return fmt.Errorf("ensure folder exists: %w", err)
|
||||
// Wrap in PathCreationError to indicate which path failed
|
||||
return &PathCreationError{
|
||||
Path: f.Path,
|
||||
Err: fmt.Errorf("ensure folder exists: %w", err),
|
||||
}
|
||||
}
|
||||
|
||||
fm.tree.Add(f, parent)
|
||||
|
||||
68
pkg/registry/apis/provisioning/resources/folders_test.go
Normal file
68
pkg/registry/apis/provisioning/resources/folders_test.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package resources_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/grafana/grafana/pkg/registry/apis/provisioning/resources"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPathCreationError(t *testing.T) {
|
||||
t.Run("Error method returns formatted message", func(t *testing.T) {
|
||||
underlyingErr := fmt.Errorf("underlying error")
|
||||
pathErr := &resources.PathCreationError{
|
||||
Path: "grafana/folder-1",
|
||||
Err: underlyingErr,
|
||||
}
|
||||
|
||||
expectedMsg := "failed to create path grafana/folder-1: underlying error"
|
||||
require.Equal(t, expectedMsg, pathErr.Error())
|
||||
})
|
||||
|
||||
t.Run("Unwrap returns underlying error", func(t *testing.T) {
|
||||
underlyingErr := fmt.Errorf("underlying error")
|
||||
pathErr := &resources.PathCreationError{
|
||||
Path: "grafana/folder-1",
|
||||
Err: underlyingErr,
|
||||
}
|
||||
|
||||
unwrapped := pathErr.Unwrap()
|
||||
require.Equal(t, underlyingErr, unwrapped)
|
||||
require.EqualError(t, unwrapped, "underlying error")
|
||||
})
|
||||
|
||||
t.Run("errors.Is finds underlying error", func(t *testing.T) {
|
||||
underlyingErr := fmt.Errorf("underlying error")
|
||||
pathErr := &resources.PathCreationError{
|
||||
Path: "grafana/folder-1",
|
||||
Err: underlyingErr,
|
||||
}
|
||||
|
||||
require.True(t, errors.Is(pathErr, underlyingErr))
|
||||
require.False(t, errors.Is(pathErr, fmt.Errorf("different error")))
|
||||
})
|
||||
|
||||
t.Run("errors.As extracts PathCreationError", func(t *testing.T) {
|
||||
underlyingErr := fmt.Errorf("underlying error")
|
||||
pathErr := &resources.PathCreationError{
|
||||
Path: "grafana/folder-1",
|
||||
Err: underlyingErr,
|
||||
}
|
||||
|
||||
var extractedErr *resources.PathCreationError
|
||||
require.True(t, errors.As(pathErr, &extractedErr))
|
||||
require.NotNil(t, extractedErr)
|
||||
require.Equal(t, "grafana/folder-1", extractedErr.Path)
|
||||
require.Equal(t, underlyingErr, extractedErr.Err)
|
||||
})
|
||||
|
||||
t.Run("errors.As returns false for non-PathCreationError", func(t *testing.T) {
|
||||
regularErr := fmt.Errorf("regular error")
|
||||
|
||||
var extractedErr *resources.PathCreationError
|
||||
require.False(t, errors.As(regularErr, &extractedErr))
|
||||
require.Nil(t, extractedErr)
|
||||
})
|
||||
}
|
||||
@@ -205,6 +205,14 @@ func (s *ModuleServer) Run() error {
|
||||
return sql.ProvideUnifiedStorageGrpcService(s.cfg, s.features, nil, s.log, s.registerer, docBuilders, s.storageMetrics, s.indexMetrics, s.searchServerRing, s.MemberlistKVConfig, s.httpServerRouter, s.storageBackend)
|
||||
})
|
||||
|
||||
m.RegisterModule(modules.SearchServer, func() (services.Service, error) {
|
||||
docBuilders, err := InitializeDocumentBuilders(s.cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sql.ProvideSearchGrpcService(s.cfg, s.features, nil, s.log, s.registerer, docBuilders, s.indexMetrics, s.searchServerRing, s.MemberlistKVConfig, s.storageBackend)
|
||||
})
|
||||
|
||||
m.RegisterModule(modules.ZanzanaServer, func() (services.Service, error) {
|
||||
return authz.ProvideZanzanaService(s.cfg, s.features, s.registerer)
|
||||
})
|
||||
|
||||
@@ -65,6 +65,7 @@ import (
|
||||
"github.com/grafana/grafana/pkg/storage/legacysql"
|
||||
"github.com/grafana/grafana/pkg/storage/unified"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resourcepb"
|
||||
search2 "github.com/grafana/grafana/pkg/storage/unified/search"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/search/builders"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/sql"
|
||||
@@ -146,6 +147,7 @@ var wireExtsBasicSet = wire.NewSet(
|
||||
wire.Bind(new(sandbox.Sandbox), new(*sandbox.Service)),
|
||||
wire.Struct(new(unified.Options), "*"),
|
||||
unified.ProvideUnifiedStorageClient,
|
||||
wire.Bind(new(resourcepb.ResourceIndexClient), new(resource.ResourceClient)),
|
||||
sql.ProvideStorageBackend,
|
||||
builder.ProvideDefaultBuildHandlerChainFuncFromBuilders,
|
||||
aggregatorrunner.ProvideNoopAggregatorConfigurator,
|
||||
|
||||
@@ -1087,13 +1087,6 @@ var (
|
||||
Stage: FeatureStageExperimental,
|
||||
Owner: identityAccessTeam,
|
||||
},
|
||||
{
|
||||
Name: "unifiedStorageSearch",
|
||||
Description: "Enable unified storage search",
|
||||
Stage: FeatureStageExperimental,
|
||||
Owner: grafanaSearchAndStorageSquad,
|
||||
HideFromDocs: true,
|
||||
},
|
||||
{
|
||||
Name: "unifiedStorageSearchSprinkles",
|
||||
Description: "Enable sprinkles on unified storage search",
|
||||
@@ -1585,8 +1578,8 @@ var (
|
||||
},
|
||||
{
|
||||
Name: "kubernetesAuthzApis",
|
||||
Description: "Registers AuthZ /apis endpoint",
|
||||
Stage: FeatureStageExperimental,
|
||||
Description: "Deprecated: Use kubernetesAuthzCoreRolesApi, kubernetesAuthzRolesApi, and kubernetesAuthzRoleBindingsApi instead",
|
||||
Stage: FeatureStageDeprecated,
|
||||
Owner: identityAccessTeam,
|
||||
HideFromDocs: true,
|
||||
},
|
||||
@@ -1611,6 +1604,27 @@ var (
|
||||
Owner: identityAccessTeam,
|
||||
HideFromDocs: true,
|
||||
},
|
||||
{
|
||||
Name: "kubernetesAuthzCoreRolesApi",
|
||||
Description: "Registers AuthZ Core Roles /apis endpoint",
|
||||
Stage: FeatureStageExperimental,
|
||||
Owner: identityAccessTeam,
|
||||
HideFromDocs: true,
|
||||
},
|
||||
{
|
||||
Name: "kubernetesAuthzRolesApi",
|
||||
Description: "Registers AuthZ Roles /apis endpoint",
|
||||
Stage: FeatureStageExperimental,
|
||||
Owner: identityAccessTeam,
|
||||
HideFromDocs: true,
|
||||
},
|
||||
{
|
||||
Name: "kubernetesAuthzRoleBindingsApi",
|
||||
Description: "Registers AuthZ Role Bindings /apis endpoint",
|
||||
Stage: FeatureStageExperimental,
|
||||
Owner: identityAccessTeam,
|
||||
HideFromDocs: true,
|
||||
},
|
||||
{
|
||||
Name: "kubernetesAuthnMutation",
|
||||
Description: "Enables create, delete, and update mutations for resources owned by IAM identity",
|
||||
@@ -1859,14 +1873,6 @@ var (
|
||||
Expression: "false",
|
||||
RequiresRestart: true,
|
||||
},
|
||||
{
|
||||
Name: "tempoSearchBackendMigration",
|
||||
Description: "Run search queries through the tempo backend",
|
||||
Stage: FeatureStageGeneralAvailability,
|
||||
Owner: grafanaOSSBigTent,
|
||||
Expression: "false",
|
||||
RequiresRestart: true,
|
||||
},
|
||||
{
|
||||
Name: "cdnPluginsLoadFirst",
|
||||
Description: "Prioritize loading plugins from the CDN before other sources",
|
||||
|
||||
7
pkg/services/featuremgmt/toggles_gen.csv
generated
7
pkg/services/featuremgmt/toggles_gen.csv
generated
@@ -150,7 +150,6 @@ alertingQueryAndExpressionsStepMode,GA,@grafana/alerting-squad,false,false,true
|
||||
improvedExternalSessionHandling,GA,@grafana/identity-access-team,false,false,false
|
||||
useSessionStorageForRedirection,GA,@grafana/identity-access-team,false,false,false
|
||||
rolePickerDrawer,experimental,@grafana/identity-access-team,false,false,false
|
||||
unifiedStorageSearch,experimental,@grafana/search-and-storage,false,false,false
|
||||
unifiedStorageSearchSprinkles,experimental,@grafana/search-and-storage,false,false,false
|
||||
managedDualWriter,experimental,@grafana/search-and-storage,false,false,false
|
||||
pluginsSriChecks,GA,@grafana/plugins-platform-backend,false,false,false
|
||||
@@ -217,10 +216,13 @@ pluginsAutoUpdate,experimental,@grafana/plugins-platform-backend,false,false,fal
|
||||
alertingListViewV2PreviewToggle,privatePreview,@grafana/alerting-squad,false,false,true
|
||||
alertRuleUseFiredAtForStartsAt,experimental,@grafana/alerting-squad,false,false,false
|
||||
alertingBulkActionsInUI,GA,@grafana/alerting-squad,false,false,true
|
||||
kubernetesAuthzApis,experimental,@grafana/identity-access-team,false,false,false
|
||||
kubernetesAuthzApis,deprecated,@grafana/identity-access-team,false,false,false
|
||||
kubernetesAuthZHandlerRedirect,experimental,@grafana/identity-access-team,false,false,false
|
||||
kubernetesAuthzResourcePermissionApis,experimental,@grafana/identity-access-team,false,false,false
|
||||
kubernetesAuthzZanzanaSync,experimental,@grafana/identity-access-team,false,false,false
|
||||
kubernetesAuthzCoreRolesApi,experimental,@grafana/identity-access-team,false,false,false
|
||||
kubernetesAuthzRolesApi,experimental,@grafana/identity-access-team,false,false,false
|
||||
kubernetesAuthzRoleBindingsApi,experimental,@grafana/identity-access-team,false,false,false
|
||||
kubernetesAuthnMutation,experimental,@grafana/identity-access-team,false,false,false
|
||||
kubernetesExternalGroupMapping,experimental,@grafana/identity-access-team,false,false,false
|
||||
restoreDashboards,experimental,@grafana/grafana-search-navigate-organise,false,false,false
|
||||
@@ -253,7 +255,6 @@ graphiteBackendMode,privatePreview,@grafana/partner-datasources,false,false,fals
|
||||
azureResourcePickerUpdates,GA,@grafana/partner-datasources,false,false,true
|
||||
prometheusTypeMigration,experimental,@grafana/partner-datasources,false,true,false
|
||||
pluginContainers,privatePreview,@grafana/plugins-platform-backend,false,true,false
|
||||
tempoSearchBackendMigration,GA,@grafana/oss-big-tent,false,true,false
|
||||
cdnPluginsLoadFirst,experimental,@grafana/plugins-platform-backend,false,false,false
|
||||
cdnPluginsUrls,experimental,@grafana/plugins-platform-backend,false,false,false
|
||||
pluginInstallAPISync,experimental,@grafana/plugins-platform-backend,false,false,false
|
||||
|
||||
|
22
pkg/services/featuremgmt/toggles_gen.go
generated
22
pkg/services/featuremgmt/toggles_gen.go
generated
@@ -455,10 +455,6 @@ const (
|
||||
// Enables the new role picker drawer design
|
||||
FlagRolePickerDrawer = "rolePickerDrawer"
|
||||
|
||||
// FlagUnifiedStorageSearch
|
||||
// Enable unified storage search
|
||||
FlagUnifiedStorageSearch = "unifiedStorageSearch"
|
||||
|
||||
// FlagUnifiedStorageSearchSprinkles
|
||||
// Enable sprinkles on unified storage search
|
||||
FlagUnifiedStorageSearchSprinkles = "unifiedStorageSearchSprinkles"
|
||||
@@ -631,7 +627,7 @@ const (
|
||||
FlagAlertRuleUseFiredAtForStartsAt = "alertRuleUseFiredAtForStartsAt"
|
||||
|
||||
// FlagKubernetesAuthzApis
|
||||
// Registers AuthZ /apis endpoint
|
||||
// Deprecated: Use kubernetesAuthzCoreRolesApi, kubernetesAuthzRolesApi, and kubernetesAuthzRoleBindingsApi instead
|
||||
FlagKubernetesAuthzApis = "kubernetesAuthzApis"
|
||||
|
||||
// FlagKubernetesAuthZHandlerRedirect
|
||||
@@ -646,6 +642,18 @@ const (
|
||||
// Enable sync of Zanzana authorization store on AuthZ CRD mutations
|
||||
FlagKubernetesAuthzZanzanaSync = "kubernetesAuthzZanzanaSync"
|
||||
|
||||
// FlagKubernetesAuthzCoreRolesApi
|
||||
// Registers AuthZ Core Roles /apis endpoint
|
||||
FlagKubernetesAuthzCoreRolesApi = "kubernetesAuthzCoreRolesApi"
|
||||
|
||||
// FlagKubernetesAuthzRolesApi
|
||||
// Registers AuthZ Roles /apis endpoint
|
||||
FlagKubernetesAuthzRolesApi = "kubernetesAuthzRolesApi"
|
||||
|
||||
// FlagKubernetesAuthzRoleBindingsApi
|
||||
// Registers AuthZ Role Bindings /apis endpoint
|
||||
FlagKubernetesAuthzRoleBindingsApi = "kubernetesAuthzRoleBindingsApi"
|
||||
|
||||
// FlagKubernetesAuthnMutation
|
||||
// Enables create, delete, and update mutations for resources owned by IAM identity
|
||||
FlagKubernetesAuthnMutation = "kubernetesAuthnMutation"
|
||||
@@ -730,10 +738,6 @@ const (
|
||||
// Enables running plugins in containers
|
||||
FlagPluginContainers = "pluginContainers"
|
||||
|
||||
// FlagTempoSearchBackendMigration
|
||||
// Run search queries through the tempo backend
|
||||
FlagTempoSearchBackendMigration = "tempoSearchBackendMigration"
|
||||
|
||||
// FlagCdnPluginsLoadFirst
|
||||
// Prioritize loading plugins from the CDN before other sources
|
||||
FlagCdnPluginsLoadFirst = "cdnPluginsLoadFirst"
|
||||
|
||||
51
pkg/services/featuremgmt/toggles_gen.json
generated
51
pkg/services/featuremgmt/toggles_gen.json
generated
@@ -1951,11 +1951,27 @@
|
||||
{
|
||||
"metadata": {
|
||||
"name": "kubernetesAuthzApis",
|
||||
"resourceVersion": "1764664939750",
|
||||
"creationTimestamp": "2025-06-18T07:43:01Z"
|
||||
"resourceVersion": "1767954559317",
|
||||
"creationTimestamp": "2025-06-18T07:43:01Z",
|
||||
"annotations": {
|
||||
"grafana.app/updatedTimestamp": "2026-01-09 10:29:19.317164 +0000 UTC"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"description": "Registers AuthZ /apis endpoint",
|
||||
"description": "Deprecated: Use kubernetesAuthzCoreRolesApi, kubernetesAuthzRolesApi, and kubernetesAuthzRoleBindingsApi instead",
|
||||
"stage": "deprecated",
|
||||
"codeowner": "@grafana/identity-access-team",
|
||||
"hideFromDocs": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"name": "kubernetesAuthzCoreRolesApi",
|
||||
"resourceVersion": "1767954459090",
|
||||
"creationTimestamp": "2026-01-09T10:27:39Z"
|
||||
},
|
||||
"spec": {
|
||||
"description": "Registers AuthZ Core Roles /apis endpoint",
|
||||
"stage": "experimental",
|
||||
"codeowner": "@grafana/identity-access-team",
|
||||
"hideFromDocs": true
|
||||
@@ -1975,6 +1991,32 @@
|
||||
"hideFromDocs": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"name": "kubernetesAuthzRoleBindingsApi",
|
||||
"resourceVersion": "1767954459090",
|
||||
"creationTimestamp": "2026-01-09T10:27:39Z"
|
||||
},
|
||||
"spec": {
|
||||
"description": "Registers AuthZ Role Bindings /apis endpoint",
|
||||
"stage": "experimental",
|
||||
"codeowner": "@grafana/identity-access-team",
|
||||
"hideFromDocs": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"name": "kubernetesAuthzRolesApi",
|
||||
"resourceVersion": "1767954459090",
|
||||
"creationTimestamp": "2026-01-09T10:27:39Z"
|
||||
},
|
||||
"spec": {
|
||||
"description": "Registers AuthZ Roles /apis endpoint",
|
||||
"stage": "experimental",
|
||||
"codeowner": "@grafana/identity-access-team",
|
||||
"hideFromDocs": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"name": "kubernetesAuthzZanzanaSync",
|
||||
@@ -3655,7 +3697,8 @@
|
||||
"metadata": {
|
||||
"name": "unifiedStorageSearch",
|
||||
"resourceVersion": "1764664939750",
|
||||
"creationTimestamp": "2024-09-30T19:46:14Z"
|
||||
"creationTimestamp": "2024-09-30T19:46:14Z",
|
||||
"deletionTimestamp": "2026-01-12T10:02:12Z"
|
||||
},
|
||||
"spec": {
|
||||
"description": "Enable unified storage search",
|
||||
|
||||
@@ -100,6 +100,9 @@ func (d *DsLookup) ByRef(ref *DataSourceRef) *DataSourceRef {
|
||||
if ref == nil {
|
||||
return d.defaultDS
|
||||
}
|
||||
if ref.UID == "default" && ref.Type == "" {
|
||||
return d.defaultDS
|
||||
}
|
||||
|
||||
key := ""
|
||||
if ref.UID != "" {
|
||||
@@ -117,7 +120,13 @@ func (d *DsLookup) ByRef(ref *DataSourceRef) *DataSourceRef {
|
||||
return ds
|
||||
}
|
||||
|
||||
return d.byName[key]
|
||||
ds, ok = d.byName[key]
|
||||
if ok {
|
||||
return ds
|
||||
}
|
||||
|
||||
// With nothing was found (or configured), use the original reference
|
||||
return ref
|
||||
}
|
||||
|
||||
func (d *DsLookup) ByType(dsType string) []DataSourceRef {
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
"tags": null,
|
||||
"datasource": [
|
||||
{
|
||||
"uid": "default.uid",
|
||||
"type": "default.type"
|
||||
"uid": "000000001",
|
||||
"type": "graphite"
|
||||
}
|
||||
],
|
||||
"panels": [
|
||||
@@ -16,8 +16,8 @@
|
||||
"libraryPanel": "dfkljg98345dkf",
|
||||
"datasource": [
|
||||
{
|
||||
"uid": "default.uid",
|
||||
"type": "default.type"
|
||||
"uid": "000000001",
|
||||
"type": "graphite"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
package dashboard
|
||||
|
||||
import "iter"
|
||||
|
||||
type PanelSummaryInfo struct {
|
||||
ID int64 `json:"id"`
|
||||
Title string `json:"title"`
|
||||
@@ -30,3 +32,20 @@ type DashboardSummaryInfo struct {
|
||||
Refresh string `json:"refresh,omitempty"`
|
||||
ReadOnly bool `json:"readOnly,omitempty"` // editable = false
|
||||
}
|
||||
|
||||
func (d *DashboardSummaryInfo) PanelIterator() iter.Seq[PanelSummaryInfo] {
|
||||
return func(yield func(PanelSummaryInfo) bool) {
|
||||
for _, p := range d.Panels {
|
||||
if len(p.Collapsed) > 0 {
|
||||
for _, c := range p.Collapsed {
|
||||
if !yield(c) { // NOTE, rows can only be one level deep!
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
if !yield(p) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -623,6 +623,8 @@ type Cfg struct {
|
||||
OverridesFilePath string
|
||||
OverridesReloadInterval time.Duration
|
||||
EnableSQLKVBackend bool
|
||||
SearchMode string // "", "embedded", "remote" - empty defaults to embedded
|
||||
SearchServerAddress string // gRPC address for remote search server
|
||||
|
||||
// Secrets Management
|
||||
SecretsManagement SecretsManagerSettings
|
||||
|
||||
@@ -136,6 +136,10 @@ func (cfg *Cfg) setUnifiedStorageConfig() {
|
||||
// use sqlkv (resource/sqlkv) instead of the sql backend (sql/backend) as the StorageServer
|
||||
cfg.EnableSQLKVBackend = section.Key("enable_sqlkv_backend").MustBool(false)
|
||||
|
||||
// search mode: "", "embedded", "remote" - empty defaults to embedded for backward compatibility
|
||||
cfg.SearchMode = section.Key("search_mode").MustString("")
|
||||
cfg.SearchServerAddress = section.Key("search_server_address").String()
|
||||
|
||||
cfg.MaxFileIndexAge = section.Key("max_file_index_age").MustDuration(0)
|
||||
cfg.MinFileIndexBuildVersion = section.Key("min_file_index_build_version").MustString("")
|
||||
}
|
||||
|
||||
@@ -236,7 +236,6 @@ kubernetesDashboards = true
|
||||
kubernetesFolders = true
|
||||
unifiedStorage = true
|
||||
unifiedStorageHistoryPruner = true
|
||||
unifiedStorageSearch = true
|
||||
unifiedStorageSearchPermissionFiltering = false
|
||||
unifiedStorageSearchSprinkles = false
|
||||
|
||||
|
||||
@@ -79,7 +79,7 @@ func NewRESTOptionsGetterMemory(originalStorageConfig storagebackend.Config, sec
|
||||
}
|
||||
|
||||
return NewRESTOptionsGetterForClient(
|
||||
resource.NewLocalResourceClient(server),
|
||||
resource.NewLocalResourceClient(server, nil),
|
||||
secrets,
|
||||
originalStorageConfig,
|
||||
nil,
|
||||
@@ -118,7 +118,7 @@ func NewRESTOptionsGetterForFileXX(path string,
|
||||
}
|
||||
|
||||
return NewRESTOptionsGetterForClient(
|
||||
resource.NewLocalResourceClient(server),
|
||||
resource.NewLocalResourceClient(server, nil),
|
||||
nil, // secrets
|
||||
originalStorageConfig,
|
||||
nil,
|
||||
|
||||
@@ -156,7 +156,7 @@ func testSetup(t testing.TB, opts ...setupOption) (context.Context, storage.Inte
|
||||
default:
|
||||
t.Fatalf("unsupported storage type: %s", setupOpts.storageType)
|
||||
}
|
||||
client := resource.NewLocalResourceClient(server)
|
||||
client := resource.NewLocalResourceClient(server, nil)
|
||||
|
||||
config := storagebackend.NewDefaultConfig(setupOpts.prefix, setupOpts.codec)
|
||||
store, destroyFunc, err := apistore.NewStorage(
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/grafana/dskit/grpcclient"
|
||||
"github.com/grafana/dskit/middleware"
|
||||
"github.com/grafana/dskit/services"
|
||||
|
||||
grafanarest "github.com/grafana/grafana/pkg/apiserver/rest"
|
||||
infraDB "github.com/grafana/grafana/pkg/infra/db"
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
@@ -135,7 +136,7 @@ func newClient(opts options.StorageOptions,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resource.NewLocalResourceClient(server), nil
|
||||
return resource.NewLocalResourceClient(server, nil), nil
|
||||
|
||||
case options.StorageTypeUnifiedGrpc:
|
||||
if opts.Address == "" {
|
||||
@@ -224,11 +225,11 @@ func newClient(opts options.StorageOptions,
|
||||
serverOptions.OverridesService = overridesSvc
|
||||
}
|
||||
|
||||
server, err := sql.NewResourceServer(serverOptions)
|
||||
server, searchServer, err := sql.NewResourceServer(serverOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resource.NewLocalResourceClient(server), nil
|
||||
return resource.NewLocalResourceClient(server, searchServer), nil
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -31,15 +31,20 @@ import (
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resourcepb"
|
||||
)
|
||||
|
||||
type SearchClient interface {
|
||||
resourcepb.ResourceIndexClient
|
||||
resourcepb.ManagedObjectIndexClient
|
||||
}
|
||||
|
||||
//go:generate mockery --name ResourceClient --structname MockResourceClient --inpackage --filename client_mock.go --with-expecter
|
||||
type ResourceClient interface {
|
||||
resourcepb.ResourceStoreClient
|
||||
resourcepb.ResourceIndexClient
|
||||
resourcepb.ManagedObjectIndexClient
|
||||
resourcepb.BulkStoreClient
|
||||
resourcepb.BlobStoreClient
|
||||
resourcepb.DiagnosticsClient
|
||||
resourcepb.QuotasClient
|
||||
// SearchClient methods are included for convenience - the client typically needs both
|
||||
SearchClient
|
||||
}
|
||||
|
||||
// Internal implementation
|
||||
@@ -92,16 +97,15 @@ func NewLegacyResourceClient(channel grpc.ClientConnInterface, indexChannel grpc
|
||||
return newResourceClient(cc, cci)
|
||||
}
|
||||
|
||||
func NewLocalResourceClient(server ResourceServer) ResourceClient {
|
||||
func NewLocalResourceClient(server ResourceServer, searchServer SearchServer) ResourceClient {
|
||||
// scenario: local in-proc
|
||||
channel := &inprocgrpc.Channel{}
|
||||
indexChannel := &inprocgrpc.Channel{}
|
||||
tracer := otel.Tracer("github.com/grafana/grafana/pkg/storage/unified/resource")
|
||||
|
||||
grpcAuthInt := grpcutils.NewUnsafeAuthenticator(tracer)
|
||||
for _, desc := range []*grpc.ServiceDesc{
|
||||
&resourcepb.ResourceStore_ServiceDesc,
|
||||
&resourcepb.ResourceIndex_ServiceDesc,
|
||||
&resourcepb.ManagedObjectIndex_ServiceDesc,
|
||||
&resourcepb.BlobStore_ServiceDesc,
|
||||
&resourcepb.BulkStore_ServiceDesc,
|
||||
&resourcepb.Diagnostics_ServiceDesc,
|
||||
@@ -117,13 +121,31 @@ func NewLocalResourceClient(server ResourceServer) ResourceClient {
|
||||
)
|
||||
}
|
||||
|
||||
// Register search services on the index channel if searchServer is provided
|
||||
if searchServer != nil {
|
||||
for _, desc := range []*grpc.ServiceDesc{
|
||||
&resourcepb.ResourceIndex_ServiceDesc,
|
||||
&resourcepb.ManagedObjectIndex_ServiceDesc,
|
||||
} {
|
||||
indexChannel.RegisterService(
|
||||
grpchan.InterceptServer(
|
||||
desc,
|
||||
grpcAuth.UnaryServerInterceptor(grpcAuthInt),
|
||||
grpcAuth.StreamServerInterceptor(grpcAuthInt),
|
||||
),
|
||||
searchServer,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
clientInt := authnlib.NewGrpcClientInterceptor(
|
||||
ProvideInProcExchanger(),
|
||||
authnlib.WithClientInterceptorIDTokenExtractor(idTokenExtractor),
|
||||
)
|
||||
|
||||
cc := grpchan.InterceptClientConn(channel, clientInt.UnaryClientInterceptor, clientInt.StreamClientInterceptor)
|
||||
return newResourceClient(cc, cc)
|
||||
cci := grpchan.InterceptClientConn(indexChannel, clientInt.UnaryClientInterceptor, clientInt.StreamClientInterceptor)
|
||||
return newResourceClient(cc, cci)
|
||||
}
|
||||
|
||||
type RemoteResourceClientConfig struct {
|
||||
|
||||
@@ -127,7 +127,10 @@ type SearchBackend interface {
|
||||
GetOpenIndexes() []NamespacedResource
|
||||
}
|
||||
|
||||
// This supports indexing+search regardless of implementation
|
||||
var _ SearchServer = &searchSupport{}
|
||||
|
||||
// This supports indexing+search regardless of implementation.
|
||||
// Implements SearchServer interface.
|
||||
type searchSupport struct {
|
||||
log log.Logger
|
||||
storage StorageBackend
|
||||
@@ -160,6 +163,10 @@ var (
|
||||
_ resourcepb.ManagedObjectIndexServer = (*searchSupport)(nil)
|
||||
)
|
||||
|
||||
func NewSearchServer(opts SearchOptions, storage StorageBackend, access types.AccessClient, blob BlobSupport, indexMetrics *BleveIndexMetrics, ownsIndexFn func(key NamespacedResource) (bool, error)) (SearchServer, error) {
|
||||
return newSearchSupport(opts, storage, access, blob, indexMetrics, ownsIndexFn)
|
||||
}
|
||||
|
||||
func newSearchSupport(opts SearchOptions, storage StorageBackend, access types.AccessClient, blob BlobSupport, indexMetrics *BleveIndexMetrics, ownsIndexFn func(key NamespacedResource) (bool, error)) (support *searchSupport, err error) {
|
||||
// No backend search support
|
||||
if opts.Backend == nil {
|
||||
@@ -598,6 +605,15 @@ func (s *searchSupport) buildIndexes(ctx context.Context) (int, error) {
|
||||
return totalBatchesIndexed, nil
|
||||
}
|
||||
|
||||
func (s *searchSupport) Init(ctx context.Context) error {
|
||||
return s.init(ctx)
|
||||
}
|
||||
|
||||
func (s *searchSupport) Stop(_ context.Context) error {
|
||||
s.stop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *searchSupport) init(ctx context.Context) error {
|
||||
origCtx := ctx
|
||||
|
||||
@@ -863,7 +879,7 @@ func newRebuildRequest(key NamespacedResource, minBuildTime, lastImportTime time
|
||||
|
||||
func (s *searchSupport) getOrCreateIndex(ctx context.Context, stats *SearchStats, key NamespacedResource, reason string) (ResourceIndex, error) {
|
||||
if s == nil || s.search == nil {
|
||||
return nil, fmt.Errorf("search is not configured properly (missing unifiedStorageSearch feature toggle?)")
|
||||
return nil, fmt.Errorf("search is not configured properly (missing enable_search config?)")
|
||||
}
|
||||
|
||||
ctx, span := tracer.Start(ctx, "resource.searchSupport.getOrCreateIndex")
|
||||
|
||||
@@ -60,7 +60,7 @@ func ProvideSearchDistributorServer(cfg *setting.Cfg, features featuremgmt.Featu
|
||||
}
|
||||
|
||||
type RingClient struct {
|
||||
Client ResourceClient
|
||||
Client SearchClient
|
||||
grpc_health_v1.HealthClient
|
||||
Conn *grpc.ClientConn
|
||||
}
|
||||
@@ -99,7 +99,7 @@ var (
|
||||
func (ds *distributorServer) Search(ctx context.Context, r *resourcepb.ResourceSearchRequest) (*resourcepb.ResourceSearchResponse, error) {
|
||||
ctx, span := ds.tracing.Start(ctx, "distributor.Search")
|
||||
defer span.End()
|
||||
ctx, client, err := ds.getClientToDistributeRequest(ctx, r.Options.Key.Namespace, "Search")
|
||||
ctx, client, err := ds.getClientToDistributeRequest(ctx, r.Options.Key.Namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -110,7 +110,7 @@ func (ds *distributorServer) Search(ctx context.Context, r *resourcepb.ResourceS
|
||||
func (ds *distributorServer) GetStats(ctx context.Context, r *resourcepb.ResourceStatsRequest) (*resourcepb.ResourceStatsResponse, error) {
|
||||
ctx, span := ds.tracing.Start(ctx, "distributor.GetStats")
|
||||
defer span.End()
|
||||
ctx, client, err := ds.getClientToDistributeRequest(ctx, r.Namespace, "GetStats")
|
||||
ctx, client, err := ds.getClientToDistributeRequest(ctx, r.Namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -215,7 +215,7 @@ func (ds *distributorServer) RebuildIndexes(ctx context.Context, r *resourcepb.R
|
||||
func (ds *distributorServer) CountManagedObjects(ctx context.Context, r *resourcepb.CountManagedObjectsRequest) (*resourcepb.CountManagedObjectsResponse, error) {
|
||||
ctx, span := ds.tracing.Start(ctx, "distributor.CountManagedObjects")
|
||||
defer span.End()
|
||||
ctx, client, err := ds.getClientToDistributeRequest(ctx, r.Namespace, "CountManagedObjects")
|
||||
ctx, client, err := ds.getClientToDistributeRequest(ctx, r.Namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -226,7 +226,7 @@ func (ds *distributorServer) CountManagedObjects(ctx context.Context, r *resourc
|
||||
func (ds *distributorServer) ListManagedObjects(ctx context.Context, r *resourcepb.ListManagedObjectsRequest) (*resourcepb.ListManagedObjectsResponse, error) {
|
||||
ctx, span := ds.tracing.Start(ctx, "distributor.ListManagedObjects")
|
||||
defer span.End()
|
||||
ctx, client, err := ds.getClientToDistributeRequest(ctx, r.Namespace, "ListManagedObjects")
|
||||
ctx, client, err := ds.getClientToDistributeRequest(ctx, r.Namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -234,7 +234,7 @@ func (ds *distributorServer) ListManagedObjects(ctx context.Context, r *resource
|
||||
return client.ListManagedObjects(ctx, r)
|
||||
}
|
||||
|
||||
func (ds *distributorServer) getClientToDistributeRequest(ctx context.Context, namespace string, methodName string) (context.Context, ResourceClient, error) {
|
||||
func (ds *distributorServer) getClientToDistributeRequest(ctx context.Context, namespace string) (context.Context, SearchClient, error) {
|
||||
ringHasher := fnv.New32a()
|
||||
_, err := ringHasher.Write([]byte(namespace))
|
||||
if err != nil {
|
||||
|
||||
@@ -34,12 +34,17 @@ import (
|
||||
|
||||
var tracer = otel.Tracer("github.com/grafana/grafana/pkg/storage/unified/resource")
|
||||
|
||||
type SearchServer interface {
|
||||
LifecycleHooks
|
||||
|
||||
resourcepb.ResourceIndexServer
|
||||
resourcepb.ManagedObjectIndexServer
|
||||
}
|
||||
|
||||
// ResourceServer implements all gRPC services
|
||||
type ResourceServer interface {
|
||||
resourcepb.ResourceStoreServer
|
||||
resourcepb.BulkStoreServer
|
||||
resourcepb.ResourceIndexServer
|
||||
resourcepb.ManagedObjectIndexServer
|
||||
resourcepb.BlobStoreServer
|
||||
resourcepb.DiagnosticsServer
|
||||
resourcepb.QuotasServer
|
||||
@@ -222,7 +227,8 @@ type ResourceServerOptions struct {
|
||||
Blob BlobConfig
|
||||
|
||||
// Search options
|
||||
Search SearchOptions
|
||||
SearchOptions SearchOptions // TODO: needed?
|
||||
Search SearchServer
|
||||
|
||||
// Quota service
|
||||
OverridesService *OverridesService
|
||||
@@ -251,16 +257,12 @@ type ResourceServerOptions struct {
|
||||
|
||||
storageMetrics *StorageMetrics
|
||||
|
||||
IndexMetrics *BleveIndexMetrics
|
||||
|
||||
// MaxPageSizeBytes is the maximum size of a page in bytes.
|
||||
MaxPageSizeBytes int
|
||||
|
||||
// QOSQueue is the quality of service queue used to enqueue
|
||||
QOSQueue QOSEnqueuer
|
||||
QOSConfig QueueConfig
|
||||
|
||||
OwnsIndexFn func(key NamespacedResource) (bool, error)
|
||||
}
|
||||
|
||||
func NewResourceServer(opts ResourceServerOptions) (*server, error) {
|
||||
@@ -343,23 +345,25 @@ func NewResourceServer(opts ResourceServerOptions) (*server, error) {
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
storageMetrics: opts.storageMetrics,
|
||||
indexMetrics: opts.IndexMetrics,
|
||||
maxPageSizeBytes: opts.MaxPageSizeBytes,
|
||||
reg: opts.Reg,
|
||||
queue: opts.QOSQueue,
|
||||
queueConfig: opts.QOSConfig,
|
||||
overridesService: opts.OverridesService,
|
||||
search: opts.Search,
|
||||
|
||||
artificialSuccessfulWriteDelay: opts.Search.IndexMinUpdateInterval,
|
||||
artificialSuccessfulWriteDelay: opts.SearchOptions.IndexMinUpdateInterval,
|
||||
}
|
||||
|
||||
if opts.Search.Resources != nil {
|
||||
var err error
|
||||
s.search, err = newSearchSupport(opts.Search, s.backend, s.access, s.blob, opts.IndexMetrics, opts.OwnsIndexFn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
/*
|
||||
if opts.Search.Resources != nil {
|
||||
var err error
|
||||
s.search, err = newSearchSupport(opts.Search, s.backend, s.access, s.blob, opts.IndexMetrics, opts.OwnsIndexFn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
err := s.Init(ctx)
|
||||
if err != nil {
|
||||
@@ -377,7 +381,7 @@ type server struct {
|
||||
backend StorageBackend
|
||||
blob BlobSupport
|
||||
secure secrets.InlineSecureValueSupport
|
||||
search *searchSupport
|
||||
search SearchServer
|
||||
diagnostics resourcepb.DiagnosticsServer
|
||||
access claims.AccessClient
|
||||
writeHooks WriteAccessHooks
|
||||
@@ -424,11 +428,6 @@ func (s *server) Init(ctx context.Context) error {
|
||||
s.initErr = s.overridesService.init(ctx)
|
||||
}
|
||||
|
||||
// initialize the search index
|
||||
if s.initErr == nil && s.search != nil {
|
||||
s.initErr = s.search.init(ctx)
|
||||
}
|
||||
|
||||
// Start watching for changes
|
||||
if s.initErr == nil {
|
||||
s.initErr = s.initWatcher()
|
||||
@@ -453,10 +452,6 @@ func (s *server) Stop(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
if s.search != nil {
|
||||
s.search.stop()
|
||||
}
|
||||
|
||||
if s.overridesService != nil {
|
||||
if err := s.overridesService.stop(ctx); err != nil {
|
||||
stopFailed = true
|
||||
@@ -1372,47 +1367,6 @@ func (s *server) Watch(req *resourcepb.WatchRequest, srv resourcepb.ResourceStor
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) Search(ctx context.Context, req *resourcepb.ResourceSearchRequest) (*resourcepb.ResourceSearchResponse, error) {
|
||||
if s.search == nil {
|
||||
return nil, fmt.Errorf("search index not configured")
|
||||
}
|
||||
|
||||
return s.search.Search(ctx, req)
|
||||
}
|
||||
|
||||
// GetStats implements ResourceServer.
|
||||
func (s *server) GetStats(ctx context.Context, req *resourcepb.ResourceStatsRequest) (*resourcepb.ResourceStatsResponse, error) {
|
||||
if err := s.Init(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if s.search == nil {
|
||||
// If the backend implements "GetStats", we can use it
|
||||
srv, ok := s.backend.(resourcepb.ResourceIndexServer)
|
||||
if ok {
|
||||
return srv.GetStats(ctx, req)
|
||||
}
|
||||
return nil, fmt.Errorf("search index not configured")
|
||||
}
|
||||
return s.search.GetStats(ctx, req)
|
||||
}
|
||||
|
||||
func (s *server) ListManagedObjects(ctx context.Context, req *resourcepb.ListManagedObjectsRequest) (*resourcepb.ListManagedObjectsResponse, error) {
|
||||
if s.search == nil {
|
||||
return nil, fmt.Errorf("search index not configured")
|
||||
}
|
||||
|
||||
return s.search.ListManagedObjects(ctx, req)
|
||||
}
|
||||
|
||||
func (s *server) CountManagedObjects(ctx context.Context, req *resourcepb.CountManagedObjectsRequest) (*resourcepb.CountManagedObjectsResponse, error) {
|
||||
if s.search == nil {
|
||||
return nil, fmt.Errorf("search index not configured")
|
||||
}
|
||||
|
||||
return s.search.CountManagedObjects(ctx, req)
|
||||
}
|
||||
|
||||
// IsHealthy implements ResourceServer.
|
||||
func (s *server) IsHealthy(ctx context.Context, req *resourcepb.HealthCheckRequest) (*resourcepb.HealthCheckResponse, error) {
|
||||
return s.diagnostics.IsHealthy(ctx, req)
|
||||
|
||||
@@ -1253,21 +1253,23 @@ func (b *bleveIndex) toBleveSearchRequest(ctx context.Context, req *resourcepb.R
|
||||
queryExact.SetField(resource.SEARCH_FIELD_TITLE)
|
||||
queryExact.Analyzer = keyword.Name // don't analyze the query input - treat it as a single token
|
||||
queryExact.Operator = query.MatchQueryOperatorAnd // This doesn't make a difference for keyword analyzer, we add it just to be explicit.
|
||||
searchQuery := bleve.NewDisjunctionQuery(queryExact)
|
||||
|
||||
// Query 2: Phrase query with standard analyzer
|
||||
queryPhrase := bleve.NewMatchPhraseQuery(req.Query)
|
||||
queryPhrase.SetBoost(5.0)
|
||||
queryPhrase.SetField(resource.SEARCH_FIELD_TITLE)
|
||||
queryPhrase.Analyzer = standard.Name
|
||||
searchQuery.AddQuery(queryPhrase)
|
||||
|
||||
// Query 3: Match query with standard analyzer
|
||||
queryAnalyzed := bleve.NewMatchQuery(removeSmallTerms(req.Query))
|
||||
queryAnalyzed.SetField(resource.SEARCH_FIELD_TITLE)
|
||||
queryAnalyzed.SetBoost(2.0)
|
||||
queryAnalyzed.Analyzer = standard.Name
|
||||
queryAnalyzed.Operator = query.MatchQueryOperatorAnd // Make sure all terms from the query are matched
|
||||
searchQuery.AddQuery(queryAnalyzed)
|
||||
|
||||
// At least one of the queries must match
|
||||
searchQuery := bleve.NewDisjunctionQuery(queryExact, queryAnalyzed, queryPhrase)
|
||||
queries = append(queries, searchQuery)
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"go.uber.org/goleak"
|
||||
|
||||
authlib "github.com/grafana/authlib/types"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/identity"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
@@ -18,6 +19,7 @@ import (
|
||||
const DASHBOARD_SCHEMA_VERSION = "schema_version"
|
||||
const DASHBOARD_LINK_COUNT = "link_count"
|
||||
const DASHBOARD_PANEL_TYPES = "panel_types"
|
||||
const DASHBOARD_PANEL_TITLE = "panel_title"
|
||||
const DASHBOARD_DS_TYPES = "ds_types"
|
||||
const DASHBOARD_TRANSFORMATIONS = "transformation"
|
||||
const DASHBOARD_LIBRARY_PANEL_REFERENCE = "reference.LibraryPanel"
|
||||
@@ -53,11 +55,21 @@ func DashboardBuilder(namespaced resource.NamespacedDocumentSupplier) (resource.
|
||||
Type: resourcepb.ResourceTableColumnDefinition_INT32,
|
||||
Description: "How many links appear on the page",
|
||||
},
|
||||
{
|
||||
Name: DASHBOARD_PANEL_TITLE,
|
||||
Type: resourcepb.ResourceTableColumnDefinition_STRING,
|
||||
IsArray: true,
|
||||
Description: "The panel title text",
|
||||
Properties: &resourcepb.ResourceTableColumnDefinition_Properties{
|
||||
Filterable: false, // full text
|
||||
FreeText: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: DASHBOARD_PANEL_TYPES,
|
||||
Type: resourcepb.ResourceTableColumnDefinition_STRING,
|
||||
IsArray: true,
|
||||
Description: "How many links appear on the page",
|
||||
Description: "The panel types used in this dashboard",
|
||||
Properties: &resourcepb.ResourceTableColumnDefinition_Properties{
|
||||
Filterable: true,
|
||||
},
|
||||
@@ -269,14 +281,22 @@ func (s *DashboardDocumentBuilder) BuildDocument(ctx context.Context, key *resou
|
||||
doc.Description = summary.Description
|
||||
doc.Tags = summary.Tags
|
||||
|
||||
panelTitles := []string{}
|
||||
panelTypes := []string{}
|
||||
transformations := []string{}
|
||||
dsTypes := []string{}
|
||||
|
||||
for _, p := range summary.Panels {
|
||||
if p.Type != "" {
|
||||
for p := range summary.PanelIterator() {
|
||||
switch p.Type {
|
||||
case "": // ignore
|
||||
case "row": // row should map to a layout type when we support v2 constructs
|
||||
default:
|
||||
panelTypes = append(panelTypes, p.Type)
|
||||
}
|
||||
|
||||
if len(p.Title) > 0 {
|
||||
panelTitles = append(panelTitles, p.Title)
|
||||
}
|
||||
if len(p.Transformer) > 0 {
|
||||
transformations = append(transformations, p.Transformer...)
|
||||
}
|
||||
@@ -309,17 +329,20 @@ func (s *DashboardDocumentBuilder) BuildDocument(ctx context.Context, key *resou
|
||||
resource.SEARCH_FIELD_LEGACY_ID: summary.ID,
|
||||
}
|
||||
|
||||
if len(panelTitles) > 0 {
|
||||
doc.Fields[DASHBOARD_PANEL_TITLE] = panelTitles
|
||||
}
|
||||
if len(panelTypes) > 0 {
|
||||
sort.Strings(panelTypes)
|
||||
doc.Fields[DASHBOARD_PANEL_TYPES] = panelTypes
|
||||
doc.Fields[DASHBOARD_PANEL_TYPES] = slices.Compact(panelTypes) // distinct values
|
||||
}
|
||||
if len(dsTypes) > 0 {
|
||||
sort.Strings(dsTypes)
|
||||
doc.Fields[DASHBOARD_DS_TYPES] = dsTypes
|
||||
doc.Fields[DASHBOARD_DS_TYPES] = slices.Compact(dsTypes) // distinct values
|
||||
}
|
||||
if len(transformations) > 0 {
|
||||
sort.Strings(transformations)
|
||||
doc.Fields[DASHBOARD_TRANSFORMATIONS] = transformations
|
||||
doc.Fields[DASHBOARD_TRANSFORMATIONS] = slices.Compact(transformations) // distinct values
|
||||
}
|
||||
|
||||
for k, v := range s.Stats[summary.UID] {
|
||||
|
||||
@@ -32,10 +32,16 @@
|
||||
"errors_last_7_days": 1,
|
||||
"grafana.app/deprecatedInternalID": 141,
|
||||
"link_count": 0,
|
||||
"panel_title": [
|
||||
"green pie",
|
||||
"red pie",
|
||||
"blue pie",
|
||||
"collapsed row"
|
||||
],
|
||||
"panel_types": [
|
||||
"barchart",
|
||||
"graph",
|
||||
"row"
|
||||
"pie"
|
||||
],
|
||||
"schema_version": 38
|
||||
},
|
||||
@@ -46,6 +52,12 @@
|
||||
"kind": "DataSource",
|
||||
"name": "DSUID"
|
||||
},
|
||||
{
|
||||
"relation": "depends-on",
|
||||
"group": "dashboards.grafana.app",
|
||||
"kind": "LibraryPanel",
|
||||
"name": "l3d2s634-fdgf-75u4-3fg3-67j966ii7jur"
|
||||
},
|
||||
{
|
||||
"relation": "depends-on",
|
||||
"group": "dashboards.grafana.app",
|
||||
|
||||
@@ -67,7 +67,7 @@
|
||||
"name": "red pie",
|
||||
"uid": "e1d5f519-dabd-47c6-9ad7-83d181ce1cee"
|
||||
},
|
||||
"title": "green pie"
|
||||
"title": "red pie"
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
@@ -78,6 +78,14 @@
|
||||
"id": 8,
|
||||
"type": "graph"
|
||||
},
|
||||
{
|
||||
"id": 20,
|
||||
"type": "graph"
|
||||
},
|
||||
{
|
||||
"id": 30,
|
||||
"type": "graph"
|
||||
},
|
||||
{
|
||||
"collapsed": true,
|
||||
"gridPos": {
|
||||
@@ -101,6 +109,10 @@
|
||||
"uid": "l3d2s634-fdgf-75u4-3fg3-67j966ii7jur"
|
||||
},
|
||||
"title": "blue pie"
|
||||
},
|
||||
{
|
||||
"id": 40,
|
||||
"type": "pie"
|
||||
}
|
||||
],
|
||||
"title": "collapsed row",
|
||||
|
||||
@@ -19,7 +19,7 @@ func NewSearchOptions(
|
||||
ownsIndexFn func(key resource.NamespacedResource) (bool, error),
|
||||
) (resource.SearchOptions, error) {
|
||||
//nolint:staticcheck // not yet migrated to OpenFeature
|
||||
if cfg.EnableSearch || features.IsEnabledGlobally(featuremgmt.FlagUnifiedStorageSearch) || features.IsEnabledGlobally(featuremgmt.FlagProvisioning) {
|
||||
if cfg.EnableSearch || features.IsEnabledGlobally(featuremgmt.FlagProvisioning) {
|
||||
root := cfg.IndexPath
|
||||
if root == "" {
|
||||
root = filepath.Join(cfg.DataPath, "unified-search", "bleve")
|
||||
|
||||
@@ -71,11 +71,18 @@
|
||||
"description": "How many links appear on the page",
|
||||
"priority": 0
|
||||
},
|
||||
{
|
||||
"name": "panel_title",
|
||||
"type": "string",
|
||||
"format": "",
|
||||
"description": "The panel title text",
|
||||
"priority": 0
|
||||
},
|
||||
{
|
||||
"name": "panel_types",
|
||||
"type": "string",
|
||||
"format": "",
|
||||
"description": "How many links appear on the page",
|
||||
"description": "The panel types used in this dashboard",
|
||||
"priority": 0
|
||||
},
|
||||
{
|
||||
@@ -214,6 +221,7 @@
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null
|
||||
],
|
||||
"object": {
|
||||
@@ -239,6 +247,7 @@
|
||||
"repo",
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
[
|
||||
"timeseries"
|
||||
],
|
||||
@@ -282,6 +291,7 @@
|
||||
"repo",
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
[
|
||||
"timeseries",
|
||||
"table"
|
||||
|
||||
83
pkg/storage/unified/sql/remote_search.go
Normal file
83
pkg/storage/unified/sql/remote_search.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package sql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resourcepb"
|
||||
)
|
||||
|
||||
var _ resource.SearchServer = (*remoteSearchClient)(nil)
|
||||
|
||||
// remoteSearchClient wraps gRPC search clients to implement the SearchServer interface.
|
||||
// This allows the storage server to delegate search operations to a remote search server.
|
||||
type remoteSearchClient struct {
|
||||
conn *grpc.ClientConn
|
||||
index resourcepb.ResourceIndexClient
|
||||
moiClient resourcepb.ManagedObjectIndexClient
|
||||
}
|
||||
|
||||
// newRemoteSearchClient creates a new remote search client that connects to a search server at the given address.
|
||||
func newRemoteSearchClient(address string) (*remoteSearchClient, error) {
|
||||
if address == "" {
|
||||
return nil, fmt.Errorf("search server address is required for remote search mode")
|
||||
}
|
||||
|
||||
conn, err := grpc.NewClient(address,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"round_robin"}`),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create gRPC connection to search server: %w", err)
|
||||
}
|
||||
|
||||
return &remoteSearchClient{
|
||||
conn: conn,
|
||||
index: resourcepb.NewResourceIndexClient(conn),
|
||||
moiClient: resourcepb.NewManagedObjectIndexClient(conn),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Init implements resource.LifecycleHooks.
|
||||
// For remote search, there's nothing to initialize locally.
|
||||
func (r *remoteSearchClient) Init(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop implements resource.LifecycleHooks.
|
||||
// Closes the gRPC connection.
|
||||
func (r *remoteSearchClient) Stop(ctx context.Context) error {
|
||||
if r.conn != nil {
|
||||
return r.conn.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Search implements resourcepb.ResourceIndexServer.
|
||||
func (r *remoteSearchClient) Search(ctx context.Context, req *resourcepb.ResourceSearchRequest) (*resourcepb.ResourceSearchResponse, error) {
|
||||
return r.index.Search(ctx, req)
|
||||
}
|
||||
|
||||
// GetStats implements resourcepb.ResourceIndexServer.
|
||||
func (r *remoteSearchClient) GetStats(ctx context.Context, req *resourcepb.ResourceStatsRequest) (*resourcepb.ResourceStatsResponse, error) {
|
||||
return r.index.GetStats(ctx, req)
|
||||
}
|
||||
|
||||
// RebuildIndexes implements resourcepb.ResourceIndexServer.
|
||||
func (r *remoteSearchClient) RebuildIndexes(ctx context.Context, req *resourcepb.RebuildIndexesRequest) (*resourcepb.RebuildIndexesResponse, error) {
|
||||
return r.index.RebuildIndexes(ctx, req)
|
||||
}
|
||||
|
||||
// CountManagedObjects implements resourcepb.ManagedObjectIndexServer.
|
||||
func (r *remoteSearchClient) CountManagedObjects(ctx context.Context, req *resourcepb.CountManagedObjectsRequest) (*resourcepb.CountManagedObjectsResponse, error) {
|
||||
return r.moiClient.CountManagedObjects(ctx, req)
|
||||
}
|
||||
|
||||
// ListManagedObjects implements resourcepb.ManagedObjectIndexServer.
|
||||
func (r *remoteSearchClient) ListManagedObjects(ctx context.Context, req *resourcepb.ListManagedObjectsRequest) (*resourcepb.ListManagedObjectsResponse, error) {
|
||||
return r.moiClient.ListManagedObjects(ctx, req)
|
||||
}
|
||||
353
pkg/storage/unified/sql/search_service.go
Normal file
353
pkg/storage/unified/sql/search_service.go
Normal file
@@ -0,0 +1,353 @@
|
||||
package sql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
|
||||
"github.com/grafana/dskit/kv"
|
||||
"github.com/grafana/dskit/netutil"
|
||||
"github.com/grafana/dskit/ring"
|
||||
"github.com/grafana/dskit/services"
|
||||
|
||||
infraDB "github.com/grafana/grafana/pkg/infra/db"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/modules"
|
||||
"github.com/grafana/grafana/pkg/services/authz"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
"github.com/grafana/grafana/pkg/services/grpcserver"
|
||||
"github.com/grafana/grafana/pkg/services/grpcserver/interceptors"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resource/grpc"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resourcepb"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/search"
|
||||
)
|
||||
|
||||
var (
|
||||
_ SearchGrpcService = (*searchService)(nil)
|
||||
)
|
||||
|
||||
// SearchGrpcService is the interface for the standalone search gRPC service.
|
||||
type SearchGrpcService interface {
|
||||
services.NamedService
|
||||
|
||||
// GetAddress returns the address where this service is running
|
||||
GetAddress() string
|
||||
}
|
||||
|
||||
type searchService struct {
|
||||
*services.BasicService
|
||||
|
||||
// Subservices manager
|
||||
subservices *services.Manager
|
||||
subservicesWatcher *services.FailureWatcher
|
||||
hasSubservices bool
|
||||
|
||||
cfg *setting.Cfg
|
||||
features featuremgmt.FeatureToggles
|
||||
db infraDB.DB
|
||||
stopCh chan struct{}
|
||||
stoppedCh chan error
|
||||
|
||||
handler grpcserver.Provider
|
||||
|
||||
tracing trace.Tracer
|
||||
|
||||
authenticator func(ctx context.Context) (context.Context, error)
|
||||
|
||||
log log.Logger
|
||||
reg prometheus.Registerer
|
||||
indexMetrics *resource.BleveIndexMetrics
|
||||
|
||||
docBuilders resource.DocumentBuilderSupplier
|
||||
|
||||
searchRing *ring.Ring
|
||||
ringLifecycler *ring.BasicLifecycler
|
||||
|
||||
backend resource.StorageBackend
|
||||
}
|
||||
|
||||
// ProvideSearchGrpcService creates a standalone search gRPC service.
|
||||
// This is used when running search-server as a separate target.
|
||||
func ProvideSearchGrpcService(
|
||||
cfg *setting.Cfg,
|
||||
features featuremgmt.FeatureToggles,
|
||||
db infraDB.DB,
|
||||
log log.Logger,
|
||||
reg prometheus.Registerer,
|
||||
docBuilders resource.DocumentBuilderSupplier,
|
||||
indexMetrics *resource.BleveIndexMetrics,
|
||||
searchRing *ring.Ring,
|
||||
memberlistKVConfig kv.Config,
|
||||
backend resource.StorageBackend,
|
||||
) (SearchGrpcService, error) {
|
||||
tracer := otel.Tracer("search-server")
|
||||
|
||||
authn := NewAuthenticatorWithFallback(cfg, reg, tracer, func(ctx context.Context) (context.Context, error) {
|
||||
auth := grpc.Authenticator{Tracer: tracer}
|
||||
return auth.Authenticate(ctx)
|
||||
})
|
||||
|
||||
s := &searchService{
|
||||
cfg: cfg,
|
||||
features: features,
|
||||
stopCh: make(chan struct{}),
|
||||
stoppedCh: make(chan error, 1),
|
||||
authenticator: authn,
|
||||
tracing: tracer,
|
||||
db: db,
|
||||
log: log,
|
||||
reg: reg,
|
||||
docBuilders: docBuilders,
|
||||
indexMetrics: indexMetrics,
|
||||
searchRing: searchRing,
|
||||
subservicesWatcher: services.NewFailureWatcher(),
|
||||
backend: backend,
|
||||
}
|
||||
|
||||
subservices := []services.Service{}
|
||||
if cfg.EnableSharding {
|
||||
ringStore, err := kv.NewClient(
|
||||
memberlistKVConfig,
|
||||
ring.GetCodec(),
|
||||
kv.RegistererWithKVName(reg, resource.RingName),
|
||||
log,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create KV store client: %s", err)
|
||||
}
|
||||
|
||||
lifecyclerCfg, err := toSearchLifecyclerConfig(cfg, log)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize search-ring lifecycler config: %s", err)
|
||||
}
|
||||
|
||||
delegate := ring.BasicLifecyclerDelegate(ring.NewInstanceRegisterDelegate(ring.JOINING, resource.RingNumTokens))
|
||||
delegate = ring.NewLeaveOnStoppingDelegate(delegate, log)
|
||||
delegate = ring.NewAutoForgetDelegate(resource.RingHeartbeatTimeout*2, delegate, log)
|
||||
|
||||
s.ringLifecycler, err = ring.NewBasicLifecycler(
|
||||
lifecyclerCfg,
|
||||
resource.RingName,
|
||||
resource.RingKey,
|
||||
ringStore,
|
||||
delegate,
|
||||
log,
|
||||
reg,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize search-ring lifecycler: %s", err)
|
||||
}
|
||||
|
||||
s.ringLifecycler.SetKeepInstanceInTheRingOnShutdown(true)
|
||||
subservices = append(subservices, s.ringLifecycler)
|
||||
}
|
||||
|
||||
if len(subservices) > 0 {
|
||||
s.hasSubservices = true
|
||||
var err error
|
||||
s.subservices, err = services.NewManager(subservices...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create subservices manager: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
s.BasicService = services.NewBasicService(s.starting, s.running, s.stopping).WithName(modules.SearchServer)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *searchService) OwnsIndex(key resource.NamespacedResource) (bool, error) {
|
||||
if s.searchRing == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if st := s.searchRing.State(); st != services.Running {
|
||||
return false, fmt.Errorf("ring is not Running: %s", st)
|
||||
}
|
||||
|
||||
ringHasher := fnv.New32a()
|
||||
_, err := ringHasher.Write([]byte(key.Namespace))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error hashing namespace: %w", err)
|
||||
}
|
||||
|
||||
rs, err := s.searchRing.GetWithOptions(ringHasher.Sum32(), searchOwnerRead, ring.WithReplicationFactor(s.searchRing.ReplicationFactor()))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error getting replicaset from ring: %w", err)
|
||||
}
|
||||
|
||||
return rs.Includes(s.ringLifecycler.GetInstanceAddr()), nil
|
||||
}
|
||||
|
||||
func (s *searchService) starting(ctx context.Context) error {
|
||||
if s.hasSubservices {
|
||||
s.subservicesWatcher.WatchManager(s.subservices)
|
||||
if err := services.StartManagerAndAwaitHealthy(ctx, s.subservices); err != nil {
|
||||
return fmt.Errorf("failed to start subservices: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
authzClient, err := authz.ProvideStandaloneAuthZClient(s.cfg, s.features, s.tracing, s.reg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
searchOptions, err := search.NewSearchOptions(s.features, s.cfg, s.docBuilders, s.indexMetrics, s.OwnsIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create search server
|
||||
searchServer, err := resource.NewSearchServer(searchOptions, s.backend, authzClient, nil, s.indexMetrics, s.OwnsIndex)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create search server: %w", err)
|
||||
}
|
||||
|
||||
if err := searchServer.Init(ctx); err != nil {
|
||||
return fmt.Errorf("failed to initialize search server: %w", err)
|
||||
}
|
||||
|
||||
s.handler, err = grpcserver.ProvideService(s.cfg, s.features, interceptors.AuthenticatorFunc(s.authenticator), s.tracing, prometheus.DefaultRegisterer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srv := s.handler.GetServer()
|
||||
resourcepb.RegisterResourceIndexServer(srv, searchServer)
|
||||
resourcepb.RegisterManagedObjectIndexServer(srv, searchServer)
|
||||
grpc_health_v1.RegisterHealthServer(srv, &searchHealthService{searchServer: searchServer})
|
||||
|
||||
// register reflection service
|
||||
_, err = grpcserver.ProvideReflectionService(s.cfg, s.handler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.cfg.EnableSharding {
|
||||
s.log.Info("waiting until search server is JOINING in the ring")
|
||||
lfcCtx, cancel := context.WithTimeout(context.Background(), s.cfg.ResourceServerJoinRingTimeout)
|
||||
defer cancel()
|
||||
if err := ring.WaitInstanceState(lfcCtx, s.searchRing, s.ringLifecycler.GetInstanceID(), ring.JOINING); err != nil {
|
||||
return fmt.Errorf("error switching to JOINING in the ring: %s", err)
|
||||
}
|
||||
s.log.Info("search server is JOINING in the ring")
|
||||
|
||||
if err := s.ringLifecycler.ChangeState(ctx, ring.ACTIVE); err != nil {
|
||||
return fmt.Errorf("error switching to ACTIVE in the ring: %s", err)
|
||||
}
|
||||
s.log.Info("search server is ACTIVE in the ring")
|
||||
}
|
||||
|
||||
// start the gRPC server
|
||||
go func() {
|
||||
err := s.handler.Run(ctx)
|
||||
if err != nil {
|
||||
s.stoppedCh <- err
|
||||
} else {
|
||||
s.stoppedCh <- nil
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *searchService) GetAddress() string {
|
||||
return s.handler.GetAddress()
|
||||
}
|
||||
|
||||
func (s *searchService) running(ctx context.Context) error {
|
||||
select {
|
||||
case err := <-s.stoppedCh:
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
return err
|
||||
}
|
||||
case err := <-s.subservicesWatcher.Chan():
|
||||
return fmt.Errorf("subservice failure: %w", err)
|
||||
case <-ctx.Done():
|
||||
close(s.stopCh)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *searchService) stopping(_ error) error {
|
||||
if s.hasSubservices {
|
||||
err := services.StopManagerAndAwaitStopped(context.Background(), s.subservices)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stop subservices: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func toSearchLifecyclerConfig(cfg *setting.Cfg, logger log.Logger) (ring.BasicLifecyclerConfig, error) {
|
||||
instanceAddr, err := ring.GetInstanceAddr(cfg.MemberlistBindAddr, netutil.PrivateNetworkInterfacesWithFallback([]string{"eth0", "en0"}, logger), logger, true)
|
||||
if err != nil {
|
||||
return ring.BasicLifecyclerConfig{}, err
|
||||
}
|
||||
|
||||
instanceId := cfg.InstanceID
|
||||
if instanceId == "" {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return ring.BasicLifecyclerConfig{}, err
|
||||
}
|
||||
instanceId = hostname
|
||||
}
|
||||
|
||||
_, grpcPortStr, err := net.SplitHostPort(cfg.GRPCServer.Address)
|
||||
if err != nil {
|
||||
return ring.BasicLifecyclerConfig{}, fmt.Errorf("could not get grpc port from grpc server address: %s", err)
|
||||
}
|
||||
|
||||
grpcPort, err := strconv.Atoi(grpcPortStr)
|
||||
if err != nil {
|
||||
return ring.BasicLifecyclerConfig{}, fmt.Errorf("error converting grpc address port to int: %s", err)
|
||||
}
|
||||
|
||||
return ring.BasicLifecyclerConfig{
|
||||
Addr: fmt.Sprintf("%s:%d", instanceAddr, grpcPort),
|
||||
ID: instanceId,
|
||||
HeartbeatPeriod: 15 * time.Second,
|
||||
HeartbeatTimeout: resource.RingHeartbeatTimeout,
|
||||
TokensObservePeriod: 0,
|
||||
NumTokens: resource.RingNumTokens,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// searchHealthService implements the health check for the search service.
|
||||
type searchHealthService struct {
|
||||
searchServer resource.SearchServer
|
||||
}
|
||||
|
||||
func (h *searchHealthService) Check(ctx context.Context, req *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) {
|
||||
return &grpc_health_v1.HealthCheckResponse{
|
||||
Status: grpc_health_v1.HealthCheckResponse_SERVING,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *searchHealthService) Watch(req *grpc_health_v1.HealthCheckRequest, server grpc_health_v1.Health_WatchServer) error {
|
||||
return fmt.Errorf("watch not implemented")
|
||||
}
|
||||
|
||||
func (h *searchHealthService) List(ctx context.Context, req *grpc_health_v1.HealthListRequest) (*grpc_health_v1.HealthListResponse, error) {
|
||||
check, err := h.Check(ctx, &grpc_health_v1.HealthCheckRequest{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &grpc_health_v1.HealthListResponse{
|
||||
Statuses: map[string]*grpc_health_v1.HealthCheckResponse{
|
||||
"": check,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@@ -48,7 +48,7 @@ type ServerOptions struct {
|
||||
OwnsIndexFn func(key resource.NamespacedResource) (bool, error)
|
||||
}
|
||||
|
||||
func NewResourceServer(opts ServerOptions) (resource.ResourceServer, error) {
|
||||
func NewResourceServer(opts ServerOptions) (resource.ResourceServer, resource.SearchServer, error) {
|
||||
apiserverCfg := opts.Cfg.SectionWithEnvOverrides("grafana-apiserver")
|
||||
|
||||
if opts.SecureValues == nil && opts.Cfg != nil && opts.Cfg.SecretsManagement.GrpcClientEnable {
|
||||
@@ -59,7 +59,7 @@ func NewResourceServer(opts ServerOptions) (resource.ResourceServer, error) {
|
||||
nil, // not needed for gRPC client mode
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create inline secure value service: %w", err)
|
||||
return nil, nil, fmt.Errorf("failed to create inline secure value service: %w", err)
|
||||
}
|
||||
opts.SecureValues = inlineSecureValueService
|
||||
}
|
||||
@@ -79,7 +79,7 @@ func NewResourceServer(opts ServerOptions) (resource.ResourceServer, error) {
|
||||
dir := strings.Replace(serverOptions.Blob.URL, "./data", opts.Cfg.DataPath, 1)
|
||||
err := os.MkdirAll(dir, 0700)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
serverOptions.Blob.URL = "file:///" + dir
|
||||
}
|
||||
@@ -96,13 +96,13 @@ func NewResourceServer(opts ServerOptions) (resource.ResourceServer, error) {
|
||||
} else {
|
||||
eDB, err := dbimpl.ProvideResourceDB(opts.DB, opts.Cfg, opts.Tracer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if opts.Cfg.EnableSQLKVBackend {
|
||||
sqlkv, err := resource.NewSQLKV(eDB)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating sqlkv: %s", err)
|
||||
return nil, nil, fmt.Errorf("error creating sqlkv: %s", err)
|
||||
}
|
||||
|
||||
kvBackendOpts := resource.KVBackendOptions{
|
||||
@@ -114,12 +114,12 @@ func NewResourceServer(opts ServerOptions) (resource.ResourceServer, error) {
|
||||
ctx := context.Background()
|
||||
dbConn, err := eDB.Init(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing DB: %w", err)
|
||||
return nil, nil, fmt.Errorf("error initializing DB: %w", err)
|
||||
}
|
||||
|
||||
dialect := sqltemplate.DialectForDriver(dbConn.DriverName())
|
||||
if dialect == nil {
|
||||
return nil, fmt.Errorf("unsupported database driver: %s", dbConn.DriverName())
|
||||
return nil, nil, fmt.Errorf("unsupported database driver: %s", dbConn.DriverName())
|
||||
}
|
||||
|
||||
rvManager, err := rvmanager.NewResourceVersionManager(rvmanager.ResourceManagerOptions{
|
||||
@@ -127,14 +127,14 @@ func NewResourceServer(opts ServerOptions) (resource.ResourceServer, error) {
|
||||
DB: dbConn,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create resource version manager: %w", err)
|
||||
return nil, nil, fmt.Errorf("failed to create resource version manager: %w", err)
|
||||
}
|
||||
|
||||
// TODO add config to decide whether to pass RvManager or not
|
||||
kvBackendOpts.RvManager = rvManager
|
||||
kvBackend, err := resource.NewKVStorageBackend(kvBackendOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating kv backend: %s", err)
|
||||
return nil, nil, fmt.Errorf("error creating kv backend: %s", err)
|
||||
}
|
||||
|
||||
serverOptions.Backend = kvBackend
|
||||
@@ -151,7 +151,7 @@ func NewResourceServer(opts ServerOptions) (resource.ResourceServer, error) {
|
||||
LastImportTimeMaxAge: opts.SearchOptions.MaxIndexAge, // No need to keep last_import_times older than max index age.
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
serverOptions.Backend = backend
|
||||
serverOptions.Diagnostics = backend
|
||||
@@ -159,13 +159,31 @@ func NewResourceServer(opts ServerOptions) (resource.ResourceServer, error) {
|
||||
}
|
||||
}
|
||||
|
||||
serverOptions.Search = opts.SearchOptions
|
||||
serverOptions.IndexMetrics = opts.IndexMetrics
|
||||
// Initialize the backend before creating search server (it needs the DB connection)
|
||||
if serverOptions.Lifecycle != nil {
|
||||
if err := serverOptions.Lifecycle.Init(context.Background()); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to initialize backend: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
search, err := resource.NewSearchServer(opts.SearchOptions, serverOptions.Backend, opts.AccessClient, nil, opts.IndexMetrics, opts.OwnsIndexFn)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to initialize search: %w", err)
|
||||
}
|
||||
|
||||
if err := search.Init(context.Background()); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to initialize search: %w", err)
|
||||
}
|
||||
|
||||
serverOptions.Search = search
|
||||
serverOptions.QOSQueue = opts.QOSQueue
|
||||
serverOptions.OwnsIndexFn = opts.OwnsIndexFn
|
||||
serverOptions.OverridesService = opts.OverridesService
|
||||
|
||||
return resource.NewResourceServer(serverOptions)
|
||||
rs, err := resource.NewResourceServer(serverOptions)
|
||||
if err != nil {
|
||||
_ = search.Stop(context.Background())
|
||||
}
|
||||
return rs, search, err
|
||||
}
|
||||
|
||||
// isHighAvailabilityEnabled determines if high availability mode should
|
||||
|
||||
@@ -292,10 +292,50 @@ func (s *service) starting(ctx context.Context) error {
|
||||
serverOptions.OverridesService = overridesSvc
|
||||
}
|
||||
|
||||
server, err := NewResourceServer(serverOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
// Handle search mode: "", "embedded", or "remote"
|
||||
// Empty string defaults to "embedded" for backward compatibility
|
||||
var searchServer resource.SearchServer
|
||||
registerSearchServices := true
|
||||
|
||||
switch s.cfg.SearchMode {
|
||||
case "remote":
|
||||
// Use remote search client - don't register search services locally
|
||||
s.log.Info("Using remote search server", "address", s.cfg.SearchServerAddress)
|
||||
remoteSearch, err := newRemoteSearchClient(s.cfg.SearchServerAddress)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create remote search client: %w", err)
|
||||
}
|
||||
searchServer = remoteSearch
|
||||
registerSearchServices = false
|
||||
|
||||
case "", "embedded":
|
||||
// Default: create local search server (backward compatible)
|
||||
s.log.Info("Using embedded search server")
|
||||
// SearchOptions are already configured, NewResourceServer will create the search server
|
||||
|
||||
default:
|
||||
return fmt.Errorf("invalid search_mode: %s (valid values: \"\", \"embedded\", \"remote\")", s.cfg.SearchMode)
|
||||
}
|
||||
|
||||
var server resource.ResourceServer
|
||||
if searchServer != nil {
|
||||
// Remote search mode: pass the remote search client to the resource server
|
||||
// Clear local search options since we're using remote search
|
||||
serverOptions.SearchOptions = resource.SearchOptions{}
|
||||
var err error
|
||||
server, _, err = NewResourceServer(serverOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Embedded mode: create both server and search server together
|
||||
var err error
|
||||
server, searchServer, err = NewResourceServer(serverOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
s.handler, err = grpcserver.ProvideService(s.cfg, s.features, interceptors.AuthenticatorFunc(s.authenticator), s.tracing, prometheus.DefaultRegisterer)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -309,8 +349,11 @@ func (s *service) starting(ctx context.Context) error {
|
||||
srv := s.handler.GetServer()
|
||||
resourcepb.RegisterResourceStoreServer(srv, server)
|
||||
resourcepb.RegisterBulkStoreServer(srv, server)
|
||||
resourcepb.RegisterResourceIndexServer(srv, server)
|
||||
resourcepb.RegisterManagedObjectIndexServer(srv, server)
|
||||
// Only register search services if running in embedded mode
|
||||
if registerSearchServices {
|
||||
resourcepb.RegisterResourceIndexServer(srv, searchServer)
|
||||
resourcepb.RegisterManagedObjectIndexServer(srv, searchServer)
|
||||
}
|
||||
resourcepb.RegisterBlobStoreServer(srv, server)
|
||||
resourcepb.RegisterDiagnosticsServer(srv, server)
|
||||
resourcepb.RegisterQuotasServer(srv, server)
|
||||
|
||||
@@ -32,6 +32,7 @@ func RunTestSearchAndStorage(t *testing.T, ctx context.Context, backend resource
|
||||
nsPrefix := "test-ns"
|
||||
|
||||
var server resource.ResourceServer
|
||||
var searchServer resource.SearchServer
|
||||
|
||||
t.Run("Create initial resources in storage", func(t *testing.T) {
|
||||
initialResources := []struct {
|
||||
@@ -96,25 +97,35 @@ func RunTestSearchAndStorage(t *testing.T, ctx context.Context, backend resource
|
||||
})
|
||||
|
||||
t.Run("Create a resource server with both backends", func(t *testing.T) {
|
||||
// Create a resource server with both backends
|
||||
// Create search server first
|
||||
var err error
|
||||
server, err = resource.NewResourceServer(resource.ResourceServerOptions{
|
||||
Backend: backend,
|
||||
Search: resource.SearchOptions{
|
||||
Backend: searchBackend,
|
||||
Resources: &resource.TestDocumentBuilderSupplier{
|
||||
GroupsResources: map[string]string{
|
||||
"test.grafana.app": "testresources",
|
||||
},
|
||||
searchOpts := resource.SearchOptions{
|
||||
Backend: searchBackend,
|
||||
Resources: &resource.TestDocumentBuilderSupplier{
|
||||
GroupsResources: map[string]string{
|
||||
"test.grafana.app": "testresources",
|
||||
},
|
||||
},
|
||||
}
|
||||
searchServer, err = resource.NewSearchServer(searchOpts, backend, nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, searchServer)
|
||||
|
||||
// Initialize the search server
|
||||
err = searchServer.Init(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a resource server with the search server
|
||||
server, err = resource.NewResourceServer(resource.ResourceServerOptions{
|
||||
Backend: backend,
|
||||
Search: searchServer,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Search for initial resources", func(t *testing.T) {
|
||||
// Test 1: Search for initial resources
|
||||
searchResp, err := server.Search(ctx, &resourcepb.ResourceSearchRequest{
|
||||
searchResp, err := searchServer.Search(ctx, &resourcepb.ResourceSearchRequest{
|
||||
Options: &resourcepb.ListOptions{
|
||||
Key: &resourcepb.ResourceKey{
|
||||
Group: "test.grafana.app",
|
||||
@@ -194,7 +205,7 @@ func RunTestSearchAndStorage(t *testing.T, ctx context.Context, backend resource
|
||||
})
|
||||
|
||||
t.Run("Search for documents", func(t *testing.T) {
|
||||
searchResp, err := server.Search(ctx, &resourcepb.ResourceSearchRequest{
|
||||
searchResp, err := searchServer.Search(ctx, &resourcepb.ResourceSearchRequest{
|
||||
Options: &resourcepb.ListOptions{
|
||||
Key: &resourcepb.ResourceKey{
|
||||
Group: "test.grafana.app",
|
||||
@@ -212,7 +223,7 @@ func RunTestSearchAndStorage(t *testing.T, ctx context.Context, backend resource
|
||||
})
|
||||
|
||||
t.Run("Search with tags", func(t *testing.T) {
|
||||
searchResp, err := server.Search(ctx, &resourcepb.ResourceSearchRequest{
|
||||
searchResp, err := searchServer.Search(ctx, &resourcepb.ResourceSearchRequest{
|
||||
Options: &resourcepb.ListOptions{
|
||||
Key: &resourcepb.ResourceKey{
|
||||
Group: "test.grafana.app",
|
||||
@@ -231,7 +242,7 @@ func RunTestSearchAndStorage(t *testing.T, ctx context.Context, backend resource
|
||||
require.Equal(t, int64(0), searchResp.TotalHits)
|
||||
|
||||
// this is the correct way of searching by tag
|
||||
searchResp, err = server.Search(ctx, &resourcepb.ResourceSearchRequest{
|
||||
searchResp, err = searchServer.Search(ctx, &resourcepb.ResourceSearchRequest{
|
||||
Options: &resourcepb.ListOptions{
|
||||
Key: &resourcepb.ResourceKey{
|
||||
Group: "test.grafana.app",
|
||||
@@ -253,7 +264,7 @@ func RunTestSearchAndStorage(t *testing.T, ctx context.Context, backend resource
|
||||
})
|
||||
|
||||
t.Run("Search with specific tag", func(t *testing.T) {
|
||||
searchResp, err := server.Search(ctx, &resourcepb.ResourceSearchRequest{
|
||||
searchResp, err := searchServer.Search(ctx, &resourcepb.ResourceSearchRequest{
|
||||
Options: &resourcepb.ListOptions{
|
||||
Key: &resourcepb.ResourceKey{
|
||||
Group: "test.grafana.app",
|
||||
@@ -272,7 +283,7 @@ func RunTestSearchAndStorage(t *testing.T, ctx context.Context, backend resource
|
||||
require.Equal(t, int64(0), searchResp.TotalHits)
|
||||
|
||||
// this is the correct way of searching by tag
|
||||
searchResp, err = server.Search(ctx, &resourcepb.ResourceSearchRequest{
|
||||
searchResp, err = searchServer.Search(ctx, &resourcepb.ResourceSearchRequest{
|
||||
Options: &resourcepb.ListOptions{
|
||||
Key: &resourcepb.ResourceKey{
|
||||
Group: "test.grafana.app",
|
||||
|
||||
@@ -4,10 +4,15 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
@@ -16,12 +21,167 @@ import (
|
||||
dashboardV0 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/identity"
|
||||
"github.com/grafana/grafana/pkg/apiserver/rest"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/services/dashboards"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/tests/apis"
|
||||
"github.com/grafana/grafana/pkg/tests/testinfra"
|
||||
"github.com/grafana/grafana/pkg/util/testutil"
|
||||
)
|
||||
|
||||
func TestIntegrationSearchDevDashboards(t *testing.T) {
|
||||
testutil.SkipIntegrationTestInShortMode(t)
|
||||
ctx := context.Background()
|
||||
|
||||
helper := apis.NewK8sTestHelper(t, testinfra.GrafanaOpts{
|
||||
DisableDataMigrations: true,
|
||||
AppModeProduction: true,
|
||||
DisableAnonymous: true,
|
||||
APIServerStorageType: "unified",
|
||||
UnifiedStorageConfig: map[string]setting.UnifiedStorageConfig{
|
||||
"dashboards.dashboard.grafana.app": {DualWriterMode: rest.Mode5},
|
||||
"folders.folder.grafana.app": {DualWriterMode: rest.Mode5},
|
||||
},
|
||||
UnifiedStorageEnableSearch: true,
|
||||
})
|
||||
defer helper.Shutdown()
|
||||
|
||||
// Create devenv dashboards from legacy API
|
||||
cfg := dynamic.ConfigFor(helper.Org1.Admin.NewRestConfig())
|
||||
cfg.GroupVersion = &dashboardV0.GroupVersion
|
||||
adminClient, err := k8srest.RESTClientFor(cfg)
|
||||
require.NoError(t, err)
|
||||
adminClient.Get()
|
||||
|
||||
fileCount := 0
|
||||
devenv := "../../../../devenv/dev-dashboards/panel-timeseries"
|
||||
err = filepath.WalkDir(devenv, func(p string, d fs.DirEntry, e error) error {
|
||||
require.NoError(t, err)
|
||||
if d.IsDir() || filepath.Ext(d.Name()) != ".json" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// use the filename as UID
|
||||
uid := strings.TrimSuffix(d.Name(), ".json")
|
||||
if len(uid) > 40 {
|
||||
uid = uid[:40] // avoid uid too long, max 40 characters
|
||||
}
|
||||
|
||||
// nolint:gosec
|
||||
data, err := os.ReadFile(p)
|
||||
require.NoError(t, err)
|
||||
|
||||
cmd := dashboards.SaveDashboardCommand{
|
||||
Dashboard: &simplejson.Json{},
|
||||
Overwrite: true,
|
||||
}
|
||||
err = cmd.Dashboard.FromDB(data)
|
||||
require.NoError(t, err)
|
||||
cmd.Dashboard.Set("id", nil)
|
||||
cmd.Dashboard.Set("uid", uid)
|
||||
data, err = json.Marshal(cmd)
|
||||
require.NoError(t, err)
|
||||
|
||||
var statusCode int
|
||||
result := adminClient.Post().AbsPath("api", "dashboards", "db").
|
||||
Body(data).
|
||||
SetHeader("Content-type", "application/json").
|
||||
Do(ctx).
|
||||
StatusCode(&statusCode)
|
||||
require.NoError(t, result.Error(), "file: [%d] %s [status:%d]", fileCount, d.Name(), statusCode)
|
||||
require.Equal(t, int(http.StatusOK), statusCode)
|
||||
fileCount++
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 16, fileCount, "file count from %s", devenv)
|
||||
|
||||
// Helper to call search
|
||||
callSearch := func(user apis.User, params string) dashboardV0.SearchResults {
|
||||
require.NotNil(t, user)
|
||||
ns := user.Identity.GetNamespace()
|
||||
cfg := dynamic.ConfigFor(user.NewRestConfig())
|
||||
cfg.GroupVersion = &dashboardV0.GroupVersion
|
||||
restClient, err := k8srest.RESTClientFor(cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
var statusCode int
|
||||
req := restClient.Get().AbsPath("apis", "dashboard.grafana.app", "v0alpha1", "namespaces", ns, "search").
|
||||
Param("limit", "1000").
|
||||
Param("type", "dashboard") // Only search dashboards
|
||||
|
||||
for kv := range strings.SplitSeq(params, "&") {
|
||||
if kv == "" {
|
||||
continue
|
||||
}
|
||||
parts := strings.SplitN(kv, "=", 2)
|
||||
if len(parts) == 2 {
|
||||
req = req.Param(parts[0], parts[1])
|
||||
}
|
||||
}
|
||||
res := req.Do(ctx).StatusCode(&statusCode)
|
||||
require.NoError(t, res.Error())
|
||||
require.Equal(t, int(http.StatusOK), statusCode)
|
||||
var sr dashboardV0.SearchResults
|
||||
raw, err := res.Raw()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, json.Unmarshal(raw, &sr))
|
||||
|
||||
// Normalize scores and query cost for snapshot comparison
|
||||
sr.QueryCost = 0 // this depends on the hardware
|
||||
sr.MaxScore = roundTo(sr.MaxScore, 3)
|
||||
for i := range sr.Hits {
|
||||
sr.Hits[i].Score = roundTo(sr.Hits[i].Score, 3) // 0.6250571494814442 -> 0.625
|
||||
}
|
||||
return sr
|
||||
}
|
||||
|
||||
// Compare a results to snapshots
|
||||
testCases := []struct {
|
||||
name string
|
||||
user apis.User
|
||||
params string
|
||||
}{
|
||||
{
|
||||
name: "all",
|
||||
user: helper.Org1.Admin,
|
||||
params: "", // only dashboards
|
||||
},
|
||||
{
|
||||
name: "simple-query",
|
||||
user: helper.Org1.Admin,
|
||||
params: "query=stacking",
|
||||
},
|
||||
{
|
||||
name: "with-text-panel",
|
||||
user: helper.Org1.Admin,
|
||||
params: "field=panel_types&panelType=text",
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
res := callSearch(tc.user, tc.params)
|
||||
jj, err := json.MarshalIndent(res, "", " ")
|
||||
require.NoError(t, err)
|
||||
|
||||
fname := fmt.Sprintf("testdata/searchV0/t%02d-%s.json", i, tc.name)
|
||||
// nolint:gosec
|
||||
snapshot, err := os.ReadFile(fname)
|
||||
if err != nil {
|
||||
assert.Failf(t, "Failed to read snapshot", "file: %s", fname)
|
||||
err = os.WriteFile(fname, jj, 0o644)
|
||||
require.NoErrorf(t, err, "Failed to write snapshot file %s", fname)
|
||||
return
|
||||
}
|
||||
|
||||
if !assert.JSONEq(t, string(snapshot), string(jj)) {
|
||||
err = os.WriteFile(fname, jj, 0o644)
|
||||
require.NoErrorf(t, err, "Failed to write snapshot file %s", fname)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegrationSearchPermissionFiltering(t *testing.T) {
|
||||
testutil.SkipIntegrationTestInShortMode(t)
|
||||
|
||||
@@ -285,3 +445,11 @@ func setFolderPermissions(t *testing.T, helper *apis.K8sTestHelper, actingUser a
|
||||
|
||||
require.Equal(t, http.StatusOK, resp.Response.StatusCode, "Failed to set permissions for folder %s", folderUID)
|
||||
}
|
||||
|
||||
// roundTo rounds a float64 to a specified number of decimal places.
|
||||
func roundTo(n float64, decimals uint32) float64 {
|
||||
// Calculate the power of 10 for the desired number of decimals
|
||||
scale := math.Pow(10, float64(decimals))
|
||||
// Multiply, round to the nearest integer, and then divide back
|
||||
return math.Round(n*scale) / scale
|
||||
}
|
||||
|
||||
165
pkg/tests/apis/dashboard/testdata/searchV0/t00-all.json
vendored
Normal file
165
pkg/tests/apis/dashboard/testdata/searchV0/t00-all.json
vendored
Normal file
@@ -0,0 +1,165 @@
|
||||
{
|
||||
"totalHits": 16,
|
||||
"hits": [
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries",
|
||||
"title": "Panel Tests - Graph NG",
|
||||
"tags": [
|
||||
"gdev",
|
||||
"panel-tests",
|
||||
"graph-ng"
|
||||
]
|
||||
},
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries-by-value-color-schemes",
|
||||
"title": "Panel Tests - Graph NG - By value color schemes",
|
||||
"tags": [
|
||||
"gdev",
|
||||
"panel-tests",
|
||||
"graph-ng"
|
||||
]
|
||||
},
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries-nulls",
|
||||
"title": "Panel Tests - Graph NG - Discrete panels",
|
||||
"tags": [
|
||||
"gdev",
|
||||
"panel-tests",
|
||||
"graph-ng",
|
||||
"timeseries",
|
||||
"trend",
|
||||
"state-timeline",
|
||||
"transform"
|
||||
]
|
||||
},
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries-gradient-area",
|
||||
"title": "Panel Tests - Graph NG - Gradient Area Fills",
|
||||
"tags": [
|
||||
"gdev",
|
||||
"panel-tests",
|
||||
"graph-ng"
|
||||
]
|
||||
},
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries-soft-limits",
|
||||
"title": "Panel Tests - Graph NG - softMin/softMax",
|
||||
"tags": [
|
||||
"gdev",
|
||||
"panel-tests",
|
||||
"graph-ng"
|
||||
]
|
||||
},
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries-yaxis-ticks",
|
||||
"title": "Panel Tests - Graph NG - Y axis ticks",
|
||||
"tags": [
|
||||
"gdev",
|
||||
"panel-tests",
|
||||
"graph-ng"
|
||||
]
|
||||
},
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries-hue-gradients",
|
||||
"title": "Panel Tests - GraphNG - Hue Gradients",
|
||||
"tags": [
|
||||
"gdev",
|
||||
"panel-tests",
|
||||
"graph-ng"
|
||||
]
|
||||
},
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries-time",
|
||||
"title": "Panel Tests - GraphNG - Time Axis",
|
||||
"tags": [
|
||||
"gdev",
|
||||
"panel-tests",
|
||||
"graph-ng"
|
||||
]
|
||||
},
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries-thresholds",
|
||||
"title": "Panel Tests - GraphNG Thresholds",
|
||||
"tags": [
|
||||
"gdev",
|
||||
"panel-tests",
|
||||
"graph-ng"
|
||||
]
|
||||
},
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries-shared-tooltip-cursor-positio",
|
||||
"title": "Panel Tests - shared tooltips cursor positioning",
|
||||
"tags": [
|
||||
"gdev",
|
||||
"panel-tests",
|
||||
"graph-ng"
|
||||
]
|
||||
},
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries-bars-high-density",
|
||||
"title": "Panel Tests - TimeSeries - bars high density (stroke + fill)",
|
||||
"tags": [
|
||||
"gdev",
|
||||
"panel-tests",
|
||||
"graph-ng"
|
||||
]
|
||||
},
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries-out-of-rage",
|
||||
"title": "Panel Tests - Timeseries - Out of range",
|
||||
"tags": [
|
||||
"gdev",
|
||||
"panel-tests",
|
||||
"graph-ng"
|
||||
]
|
||||
},
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries-stacking",
|
||||
"title": "Panel Tests - TimeSeries - stacking",
|
||||
"tags": [
|
||||
"gdev",
|
||||
"panel-tests",
|
||||
"graph-ng"
|
||||
]
|
||||
},
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries-formats",
|
||||
"title": "Panel Tests - Timeseries - Supported input formats"
|
||||
},
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries-stacking2",
|
||||
"title": "TimeSeries \u0026 BarChart Stacking",
|
||||
"tags": [
|
||||
"gdev",
|
||||
"panel-tests",
|
||||
"graph-ng"
|
||||
]
|
||||
},
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries-y-ticks-zero-decimals",
|
||||
"title": "Zero Decimals Y Ticks",
|
||||
"tags": [
|
||||
"gdev",
|
||||
"panel-tests",
|
||||
"graph-ng"
|
||||
]
|
||||
}
|
||||
],
|
||||
"maxScore": 1
|
||||
}
|
||||
28
pkg/tests/apis/dashboard/testdata/searchV0/t01-simple-query.json
vendored
Normal file
28
pkg/tests/apis/dashboard/testdata/searchV0/t01-simple-query.json
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"totalHits": 2,
|
||||
"hits": [
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries-stacking",
|
||||
"title": "Panel Tests - TimeSeries - stacking",
|
||||
"tags": [
|
||||
"gdev",
|
||||
"panel-tests",
|
||||
"graph-ng"
|
||||
],
|
||||
"score": 0.658
|
||||
},
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries-stacking2",
|
||||
"title": "TimeSeries \u0026 BarChart Stacking",
|
||||
"tags": [
|
||||
"gdev",
|
||||
"panel-tests",
|
||||
"graph-ng"
|
||||
],
|
||||
"score": 0.625
|
||||
}
|
||||
],
|
||||
"maxScore": 0.658
|
||||
}
|
||||
18
pkg/tests/apis/dashboard/testdata/searchV0/t02-with-text-panel.json
vendored
Normal file
18
pkg/tests/apis/dashboard/testdata/searchV0/t02-with-text-panel.json
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"totalHits": 1,
|
||||
"hits": [
|
||||
{
|
||||
"resource": "dashboards",
|
||||
"name": "timeseries-formats",
|
||||
"title": "Panel Tests - Timeseries - Supported input formats",
|
||||
"field": {
|
||||
"panel_types": [
|
||||
"table",
|
||||
"text",
|
||||
"timeseries"
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"maxScore": 1.778
|
||||
}
|
||||
@@ -2054,9 +2054,7 @@ func TestIntegrationDeleteFolderWithProvisionedDashboards(t *testing.T) {
|
||||
DualWriterMode: modeDw,
|
||||
},
|
||||
},
|
||||
EnableFeatureToggles: []string{
|
||||
featuremgmt.FlagUnifiedStorageSearch,
|
||||
},
|
||||
UnifiedStorageEnableSearch: true,
|
||||
}
|
||||
|
||||
setupProvisioningDir(t, &ops)
|
||||
@@ -2163,9 +2161,7 @@ func TestIntegrationProvisionedFolderPropagatesLabelsAndAnnotations(t *testing.T
|
||||
DualWriterMode: mode3,
|
||||
},
|
||||
},
|
||||
EnableFeatureToggles: []string{
|
||||
featuremgmt.FlagUnifiedStorageSearch,
|
||||
},
|
||||
UnifiedStorageEnableSearch: true,
|
||||
}
|
||||
|
||||
setupProvisioningDir(t, &ops)
|
||||
|
||||
@@ -1830,6 +1830,22 @@
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "panelType",
|
||||
"in": "query",
|
||||
"description": "find dashboards using panels of a given plugin type",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "dataSourceType",
|
||||
"in": "query",
|
||||
"description": "find dashboards using datasources of a given plugin type",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "permission",
|
||||
"in": "query",
|
||||
|
||||
@@ -559,3 +559,175 @@ func TestIntegrationConnectionController_HealthCheckUpdates(t *testing.T) {
|
||||
assert.True(t, final.Status.Health.Healthy, "connection should remain healthy")
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegrationProvisioning_RepositoryFieldSelectorByConnection(t *testing.T) {
|
||||
testutil.SkipIntegrationTestInShortMode(t)
|
||||
|
||||
helper := runGrafana(t)
|
||||
ctx := context.Background()
|
||||
createOptions := metav1.CreateOptions{FieldValidation: "Strict"}
|
||||
|
||||
// Create a connection first
|
||||
connection := &unstructured.Unstructured{Object: map[string]any{
|
||||
"apiVersion": "provisioning.grafana.app/v0alpha1",
|
||||
"kind": "Connection",
|
||||
"metadata": map[string]any{
|
||||
"name": "test-conn-for-field-selector",
|
||||
"namespace": "default",
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"type": "github",
|
||||
"github": map[string]any{
|
||||
"appID": "123456",
|
||||
"installationID": "789012",
|
||||
},
|
||||
},
|
||||
"secure": map[string]any{
|
||||
"privateKey": map[string]any{
|
||||
"create": "test-private-key",
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
_, err := helper.Connections.Resource.Create(ctx, connection, createOptions)
|
||||
require.NoError(t, err, "failed to create connection")
|
||||
|
||||
t.Cleanup(func() {
|
||||
// Clean up repositories first
|
||||
_ = helper.Repositories.Resource.Delete(ctx, "repo-with-connection", metav1.DeleteOptions{})
|
||||
_ = helper.Repositories.Resource.Delete(ctx, "repo-without-connection", metav1.DeleteOptions{})
|
||||
_ = helper.Repositories.Resource.Delete(ctx, "repo-with-different-connection", metav1.DeleteOptions{})
|
||||
// Then clean up the connection
|
||||
_ = helper.Connections.Resource.Delete(ctx, "test-conn-for-field-selector", metav1.DeleteOptions{})
|
||||
})
|
||||
|
||||
// Create a repository WITH the connection
|
||||
repoWithConnection := &unstructured.Unstructured{Object: map[string]any{
|
||||
"apiVersion": "provisioning.grafana.app/v0alpha1",
|
||||
"kind": "Repository",
|
||||
"metadata": map[string]any{
|
||||
"name": "repo-with-connection",
|
||||
"namespace": "default",
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"title": "Repo With Connection",
|
||||
"type": "local",
|
||||
"sync": map[string]any{
|
||||
"enabled": false,
|
||||
"target": "folder",
|
||||
},
|
||||
"local": map[string]any{
|
||||
"path": helper.ProvisioningPath,
|
||||
},
|
||||
"connection": map[string]any{
|
||||
"name": "test-conn-for-field-selector",
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
_, err = helper.Repositories.Resource.Create(ctx, repoWithConnection, createOptions)
|
||||
require.NoError(t, err, "failed to create repository with connection")
|
||||
|
||||
// Create a repository WITHOUT the connection
|
||||
repoWithoutConnection := &unstructured.Unstructured{Object: map[string]any{
|
||||
"apiVersion": "provisioning.grafana.app/v0alpha1",
|
||||
"kind": "Repository",
|
||||
"metadata": map[string]any{
|
||||
"name": "repo-without-connection",
|
||||
"namespace": "default",
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"title": "Repo Without Connection",
|
||||
"type": "local",
|
||||
"sync": map[string]any{
|
||||
"enabled": false,
|
||||
"target": "folder",
|
||||
},
|
||||
"local": map[string]any{
|
||||
"path": helper.ProvisioningPath,
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
_, err = helper.Repositories.Resource.Create(ctx, repoWithoutConnection, createOptions)
|
||||
require.NoError(t, err, "failed to create repository without connection")
|
||||
|
||||
// Create a repository with a DIFFERENT connection name (non-existent)
|
||||
repoWithDifferentConnection := &unstructured.Unstructured{Object: map[string]any{
|
||||
"apiVersion": "provisioning.grafana.app/v0alpha1",
|
||||
"kind": "Repository",
|
||||
"metadata": map[string]any{
|
||||
"name": "repo-with-different-connection",
|
||||
"namespace": "default",
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"title": "Repo With Different Connection",
|
||||
"type": "local",
|
||||
"sync": map[string]any{
|
||||
"enabled": false,
|
||||
"target": "folder",
|
||||
},
|
||||
"local": map[string]any{
|
||||
"path": helper.ProvisioningPath,
|
||||
},
|
||||
"connection": map[string]any{
|
||||
"name": "some-other-connection",
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
_, err = helper.Repositories.Resource.Create(ctx, repoWithDifferentConnection, createOptions)
|
||||
require.NoError(t, err, "failed to create repository with different connection")
|
||||
|
||||
t.Run("filter repositories by spec.connection.name", func(t *testing.T) {
|
||||
// List repositories with field selector for the specific connection
|
||||
list, err := helper.Repositories.Resource.List(ctx, metav1.ListOptions{
|
||||
FieldSelector: "spec.connection.name=test-conn-for-field-selector",
|
||||
})
|
||||
require.NoError(t, err, "failed to list repositories with field selector")
|
||||
|
||||
// Should only return the repository with the matching connection
|
||||
assert.Len(t, list.Items, 1, "should return exactly one repository")
|
||||
assert.Equal(t, "repo-with-connection", list.Items[0].GetName(), "should return the correct repository")
|
||||
})
|
||||
|
||||
t.Run("filter repositories by non-existent connection returns empty", func(t *testing.T) {
|
||||
// List repositories with field selector for a non-existent connection
|
||||
list, err := helper.Repositories.Resource.List(ctx, metav1.ListOptions{
|
||||
FieldSelector: "spec.connection.name=non-existent-connection",
|
||||
})
|
||||
require.NoError(t, err, "failed to list repositories with field selector")
|
||||
|
||||
// Should return empty list
|
||||
assert.Len(t, list.Items, 0, "should return no repositories for non-existent connection")
|
||||
})
|
||||
|
||||
t.Run("filter repositories by empty connection name", func(t *testing.T) {
|
||||
// List repositories with field selector for empty connection (repos without connection)
|
||||
list, err := helper.Repositories.Resource.List(ctx, metav1.ListOptions{
|
||||
FieldSelector: "spec.connection.name=",
|
||||
})
|
||||
require.NoError(t, err, "failed to list repositories with empty connection field selector")
|
||||
|
||||
// Should return the repository without a connection
|
||||
assert.Len(t, list.Items, 1, "should return exactly one repository without connection")
|
||||
assert.Equal(t, "repo-without-connection", list.Items[0].GetName(), "should return the repository without connection")
|
||||
})
|
||||
|
||||
t.Run("list all repositories without field selector", func(t *testing.T) {
|
||||
// List all repositories without field selector
|
||||
list, err := helper.Repositories.Resource.List(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err, "failed to list all repositories")
|
||||
|
||||
// Should return all three repositories
|
||||
assert.Len(t, list.Items, 3, "should return all three repositories")
|
||||
|
||||
names := make([]string, len(list.Items))
|
||||
for i, item := range list.Items {
|
||||
names[i] = item.GetName()
|
||||
}
|
||||
assert.Contains(t, names, "repo-with-connection")
|
||||
assert.Contains(t, names, "repo-without-connection")
|
||||
assert.Contains(t, names, "repo-with-different-connection")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { generatedAPI } from '@grafana/api-clients/rtkq/collections/v1alpha1';
|
||||
import { t } from '@grafana/i18n';
|
||||
import { notifyApp } from 'app/core/actions';
|
||||
import { createSuccessNotification, createErrorNotification } from 'app/core/copy/appNotification';
|
||||
import { notifyApp } from 'app/core/reducers/appNotification';
|
||||
|
||||
export const collectionsAPIv1alpha1 = generatedAPI.enhanceEndpoints({
|
||||
endpoints: {
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import { generatedAPI, type Playlist, type PlaylistSpec } from '@grafana/api-clients/rtkq/playlist/v0alpha1';
|
||||
import { getBackendSrv } from '@grafana/runtime';
|
||||
|
||||
import { notifyApp } from '../../../../core/actions';
|
||||
import { createSuccessNotification } from '../../../../core/copy/appNotification';
|
||||
import { notifyApp } from '../../../../core/reducers/appNotification';
|
||||
import { contextSrv } from '../../../../core/services/context_srv';
|
||||
import { handleError } from '../../../utils';
|
||||
|
||||
|
||||
@@ -12,8 +12,8 @@ import { isFetchError } from '@grafana/runtime';
|
||||
import { clearFolders } from 'app/features/browse-dashboards/state/slice';
|
||||
import { getState } from 'app/store/store';
|
||||
|
||||
import { notifyApp } from '../../../../core/actions';
|
||||
import { createSuccessNotification, createErrorNotification } from '../../../../core/copy/appNotification';
|
||||
import { notifyApp } from '../../../../core/reducers/appNotification';
|
||||
import { PAGE_SIZE } from '../../../../features/browse-dashboards/api/services';
|
||||
import { refetchChildren } from '../../../../features/browse-dashboards/state/actions';
|
||||
import { handleError } from '../../../utils';
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import { normalizeError } from '@grafana/api-clients';
|
||||
import { ThunkDispatch } from 'app/types/store';
|
||||
|
||||
import { notifyApp } from '../core/actions';
|
||||
import { createErrorNotification } from '../core/copy/appNotification';
|
||||
import { notifyApp } from '../core/reducers/appNotification';
|
||||
|
||||
/**
|
||||
* Handle an error from a k8s API call
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
import { hideAppNotification, notifyApp } from '../reducers/appNotification';
|
||||
import { updateNavIndex, updateConfigurationSubtitle } from '../reducers/navModel';
|
||||
|
||||
export { updateNavIndex, updateConfigurationSubtitle, notifyApp, hideAppNotification };
|
||||
@@ -4,10 +4,9 @@ import { useLocation } from 'react-router-dom';
|
||||
|
||||
import { AlertErrorPayload, AlertPayload, AppEvents, GrafanaTheme2 } from '@grafana/data';
|
||||
import { useStyles2, Stack } from '@grafana/ui';
|
||||
import { notifyApp, hideAppNotification } from 'app/core/actions';
|
||||
import { appEvents } from 'app/core/app_events';
|
||||
import { useGrafana } from 'app/core/context/GrafanaContext';
|
||||
import { selectVisible } from 'app/core/reducers/appNotification';
|
||||
import { hideAppNotification, notifyApp, selectVisible } from 'app/core/reducers/appNotification';
|
||||
import { useSelector, useDispatch } from 'app/types/store';
|
||||
|
||||
import {
|
||||
|
||||
@@ -6,7 +6,7 @@ import { dispatch as storeDispatch } from 'app/store/store';
|
||||
import { AppNotificationSeverity, AppNotification } from 'app/types/appNotifications';
|
||||
import { useDispatch } from 'app/types/store';
|
||||
|
||||
import { notifyApp } from '../actions';
|
||||
import { notifyApp } from '../reducers/appNotification';
|
||||
|
||||
const defaultSuccessNotification = {
|
||||
title: '',
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
"unicons/bookmark",
|
||||
"unicons/book-open",
|
||||
"unicons/brackets-curly",
|
||||
"unicons/brain",
|
||||
"unicons/bug",
|
||||
"unicons/building",
|
||||
"unicons/calculator-alt",
|
||||
|
||||
@@ -10,7 +10,6 @@ import {
|
||||
} from '@grafana/data';
|
||||
import { t } from '@grafana/i18n';
|
||||
import { getDataSourceSrv } from '@grafana/runtime';
|
||||
import { notifyApp } from 'app/core/actions';
|
||||
import { createErrorNotification, createWarningNotification } from 'app/core/copy/appNotification';
|
||||
import { dispatch } from 'app/store/store';
|
||||
import { RichHistoryQuery } from 'app/types/explore';
|
||||
@@ -23,6 +22,7 @@ import {
|
||||
} from '../history/RichHistoryStorage';
|
||||
import { createRetentionPeriodBoundary } from '../history/richHistoryLocalStorageUtils';
|
||||
import { getLocalRichHistoryStorage, getRichHistoryStorage } from '../history/richHistoryStorageProvider';
|
||||
import { notifyApp } from '../reducers/appNotification';
|
||||
import { contextSrv } from '../services/context_srv';
|
||||
|
||||
import {
|
||||
|
||||
@@ -5,7 +5,6 @@ import { t } from '@grafana/i18n';
|
||||
import { getBackendSrv, config, locationService } from '@grafana/runtime';
|
||||
import { sceneGraph, SceneTimeRangeLike, VizPanel } from '@grafana/scenes';
|
||||
import { shortURLAPIv1beta1 } from 'app/api/clients/shorturl/v1beta1';
|
||||
import { notifyApp } from 'app/core/actions';
|
||||
import { createErrorNotification, createSuccessNotification } from 'app/core/copy/appNotification';
|
||||
import { DashboardScene } from 'app/features/dashboard-scene/scene/DashboardScene';
|
||||
import { getDashboardUrl } from 'app/features/dashboard-scene/utils/getDashboardUrl';
|
||||
@@ -14,6 +13,7 @@ import { dispatch } from 'app/store/store';
|
||||
import { ShortURL } from '../../../../apps/shorturl/plugin/src/generated/shorturl/v1beta1/shorturl_object_gen';
|
||||
import { extractErrorMessage } from '../../api/utils';
|
||||
import { ShareLinkConfiguration } from '../../features/dashboard-scene/sharing/ShareButton/utils';
|
||||
import { notifyApp } from '../reducers/appNotification';
|
||||
|
||||
import { copyStringToClipboard } from './explore';
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { UrlQueryMap, getTimeZone, getDefaultTimeRange, dateMath } from '@grafana/data';
|
||||
import { locationService } from '@grafana/runtime';
|
||||
import { getFolderByUidFacade } from 'app/api/clients/folder/v1beta1/hooks';
|
||||
import { updateNavIndex } from 'app/core/actions';
|
||||
import { updateNavIndex } from 'app/core/reducers/navModel';
|
||||
import { buildNavModel } from 'app/features/folders/state/navModel';
|
||||
import { store } from 'app/store/store';
|
||||
|
||||
|
||||
@@ -9,8 +9,8 @@ import {
|
||||
sceneGraph,
|
||||
SceneTimeRangeLike,
|
||||
} from '@grafana/scenes';
|
||||
import { notifyApp } from 'app/core/actions';
|
||||
import { createErrorNotification } from 'app/core/copy/appNotification';
|
||||
import { notifyApp } from 'app/core/reducers/appNotification';
|
||||
import { contextSrv } from 'app/core/services/context_srv';
|
||||
import { getMessageFromError } from 'app/core/utils/errors';
|
||||
import { alertRuleApi } from 'app/features/alerting/unified/api/alertRuleApi';
|
||||
|
||||
@@ -12,9 +12,9 @@ import {
|
||||
LibraryPanelRef,
|
||||
LibraryPanelKind,
|
||||
} from '@grafana/schema/dist/esm/schema/dashboard/v2';
|
||||
import { notifyApp } from 'app/core/actions';
|
||||
import config from 'app/core/config';
|
||||
import { createErrorNotification } from 'app/core/copy/appNotification';
|
||||
import { notifyApp } from 'app/core/reducers/appNotification';
|
||||
import { buildPanelKind } from 'app/features/dashboard/api/ResponseTransformers';
|
||||
import { DashboardModel } from 'app/features/dashboard/state/DashboardModel';
|
||||
import { PanelModel, GridPos } from 'app/features/dashboard/state/PanelModel';
|
||||
|
||||
@@ -9,8 +9,8 @@ import { Trans, t } from '@grafana/i18n';
|
||||
import { config } from '@grafana/runtime';
|
||||
import { SceneComponentProps } from '@grafana/scenes';
|
||||
import { Button, ClipboardButton, CodeEditor, Label, Spinner, Stack, Switch, useStyles2 } from '@grafana/ui';
|
||||
import { notifyApp } from 'app/core/actions';
|
||||
import { createSuccessNotification } from 'app/core/copy/appNotification';
|
||||
import { notifyApp } from 'app/core/reducers/appNotification';
|
||||
import { dispatch } from 'app/store/store';
|
||||
|
||||
import { ShareExportTab } from '../ShareExportTab';
|
||||
|
||||
@@ -6,8 +6,8 @@ import { Trans, t } from '@grafana/i18n';
|
||||
import { SceneComponentProps, sceneGraph, SceneObjectBase, SceneObjectRef, VizPanel } from '@grafana/scenes';
|
||||
import { Dashboard } from '@grafana/schema';
|
||||
import { Button, ClipboardButton, Field, Input, Modal, RadioButtonGroup, Stack } from '@grafana/ui';
|
||||
import { notifyApp } from 'app/core/actions';
|
||||
import { createSuccessNotification } from 'app/core/copy/appNotification';
|
||||
import { notifyApp } from 'app/core/reducers/appNotification';
|
||||
import { getTrackingSource, shareDashboardType } from 'app/features/dashboard/components/ShareModal/utils';
|
||||
import { getDashboardSnapshotSrv, SnapshotSharingOptions } from 'app/features/dashboard/services/SnapshotSrv';
|
||||
import { dispatch } from 'app/store/store';
|
||||
|
||||
@@ -3,8 +3,8 @@ import { createApi } from '@reduxjs/toolkit/query/react';
|
||||
import { createBaseQuery } from '@grafana/api-clients/rtkq';
|
||||
import { t } from '@grafana/i18n';
|
||||
import { config, FetchError, isFetchError } from '@grafana/runtime';
|
||||
import { notifyApp } from 'app/core/actions';
|
||||
import { createErrorNotification, createSuccessNotification } from 'app/core/copy/appNotification';
|
||||
import { notifyApp } from 'app/core/reducers/appNotification';
|
||||
import {
|
||||
PublicDashboard,
|
||||
PublicDashboardSettings,
|
||||
|
||||
@@ -14,7 +14,6 @@ import {
|
||||
ToolbarButtonRow,
|
||||
ConfirmModal,
|
||||
} from '@grafana/ui';
|
||||
import { updateNavIndex } from 'app/core/actions';
|
||||
import { appEvents } from 'app/core/app_events';
|
||||
import { AppChromeUpdate } from 'app/core/components/AppChrome/AppChromeUpdate';
|
||||
import { NavToolbarSeparator } from 'app/core/components/AppChrome/NavToolbar/NavToolbarSeparator';
|
||||
@@ -22,7 +21,7 @@ import config from 'app/core/config';
|
||||
import { useAppNotification } from 'app/core/copy/appNotification';
|
||||
import { useBusEvent } from 'app/core/hooks/useBusEvent';
|
||||
import { ID_PREFIX, setStarred } from 'app/core/reducers/navBarTree';
|
||||
import { removeNavIndex } from 'app/core/reducers/navModel';
|
||||
import { removeNavIndex, updateNavIndex } from 'app/core/reducers/navModel';
|
||||
import AddPanelButton from 'app/features/dashboard/components/AddPanelButton/AddPanelButton';
|
||||
import { SaveDashboardDrawer } from 'app/features/dashboard/components/SaveDashboard/SaveDashboardDrawer';
|
||||
import { getDashboardSrv } from 'app/features/dashboard/services/DashboardSrv';
|
||||
|
||||
@@ -24,6 +24,7 @@ import { appEvents } from 'app/core/app_events';
|
||||
import { AppChromeUpdate } from 'app/core/components/AppChrome/AppChromeUpdate';
|
||||
import { Page } from 'app/core/components/Page/Page';
|
||||
import { SplitPaneWrapper } from 'app/core/components/SplitPaneWrapper/SplitPaneWrapper';
|
||||
import { notifyApp } from 'app/core/reducers/appNotification';
|
||||
import { SubMenuItems } from 'app/features/dashboard/components/SubMenu/SubMenuItems';
|
||||
import { SaveLibraryPanelModal } from 'app/features/library-panels/components/SaveLibraryPanelModal/SaveLibraryPanelModal';
|
||||
import { PanelModelWithLibraryPanel } from 'app/features/library-panels/types';
|
||||
@@ -32,7 +33,6 @@ import { updateTimeZoneForSession } from 'app/features/profile/state/reducers';
|
||||
import { PanelOptionsChangedEvent, ShowModalReactEvent } from 'app/types/events';
|
||||
import { StoreState } from 'app/types/store';
|
||||
|
||||
import { notifyApp } from '../../../../core/actions';
|
||||
import { UnlinkModal } from '../../../dashboard-scene/scene/UnlinkModal';
|
||||
import { isPanelModelLibraryPanel } from '../../../library-panels/guard';
|
||||
import { getVariablesByKey } from '../../../variables/state/selectors';
|
||||
|
||||
@@ -8,10 +8,10 @@ import { createTheme } from '@grafana/data';
|
||||
import { selectors } from '@grafana/e2e-selectors';
|
||||
import { config, setDataSourceSrv } from '@grafana/runtime';
|
||||
import { Dashboard } from '@grafana/schema';
|
||||
import { notifyApp } from 'app/core/actions';
|
||||
import { AppChrome } from 'app/core/components/AppChrome/AppChrome';
|
||||
import { getRouteComponentProps } from 'app/core/navigation/mocks/routeProps';
|
||||
import { RouteDescriptor } from 'app/core/navigation/types';
|
||||
import { notifyApp } from 'app/core/reducers/appNotification';
|
||||
import { HOME_NAV_ID } from 'app/core/reducers/navModel';
|
||||
import { DashboardInitPhase, DashboardMeta, DashboardRoutes } from 'app/types/dashboard';
|
||||
|
||||
|
||||
@@ -6,13 +6,13 @@ import { NavModel, NavModelItem, TimeRange, PageLayoutType, locationUtil, Grafan
|
||||
import { selectors } from '@grafana/e2e-selectors';
|
||||
import { locationService } from '@grafana/runtime';
|
||||
import { Themeable2, withTheme2 } from '@grafana/ui';
|
||||
import { notifyApp } from 'app/core/actions';
|
||||
import { ScrollRefElement } from 'app/core/components/NativeScrollbar';
|
||||
import { Page } from 'app/core/components/Page/Page';
|
||||
import { GrafanaContext, GrafanaContextType } from 'app/core/context/GrafanaContext';
|
||||
import { createErrorNotification } from 'app/core/copy/appNotification';
|
||||
import { getKioskMode } from 'app/core/navigation/kiosk';
|
||||
import { GrafanaRouteComponentProps } from 'app/core/navigation/types';
|
||||
import { notifyApp } from 'app/core/reducers/appNotification';
|
||||
import { ID_PREFIX } from 'app/core/reducers/navBarTree';
|
||||
import { getNavModel } from 'app/core/selectors/navModel';
|
||||
import { PanelModel } from 'app/features/dashboard/state/PanelModel';
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import { PanelModel } from '@grafana/data';
|
||||
import { t } from '@grafana/i18n';
|
||||
import { locationService } from '@grafana/runtime';
|
||||
import { notifyApp } from 'app/core/actions';
|
||||
import { createErrorNotification } from 'app/core/copy/appNotification';
|
||||
import { notifyApp } from 'app/core/reducers/appNotification';
|
||||
import { DataSourceInput } from 'app/features/manage-dashboards/state/reducers';
|
||||
import { DashboardJson } from 'app/features/manage-dashboards/types';
|
||||
import { dispatch } from 'app/types/store';
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import { TimeZone } from '@grafana/data';
|
||||
import { getBackendSrv } from '@grafana/runtime';
|
||||
import { WeekStart } from '@grafana/ui';
|
||||
import { notifyApp } from 'app/core/actions';
|
||||
import { createSuccessNotification } from 'app/core/copy/appNotification';
|
||||
import { notifyApp } from 'app/core/reducers/appNotification';
|
||||
import { getDashboardAPI } from 'app/features/dashboard/api/dashboard_api';
|
||||
import { dashboardWatcher } from 'app/features/live/dashboard/dashboardWatcher';
|
||||
import { removeAllPanels } from 'app/features/panel/state/reducers';
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import { DataQuery, locationUtil, setWeekStart, DashboardLoadedEvent } from '@grafana/data';
|
||||
import { t } from '@grafana/i18n';
|
||||
import { config, isFetchError, locationService } from '@grafana/runtime';
|
||||
import { notifyApp } from 'app/core/actions';
|
||||
import { appEvents } from 'app/core/app_events';
|
||||
import { createErrorNotification } from 'app/core/copy/appNotification';
|
||||
import { notifyApp } from 'app/core/reducers/appNotification';
|
||||
import { backendSrv } from 'app/core/services/backend_srv';
|
||||
import { KeybindingSrv } from 'app/core/services/keybindingSrv';
|
||||
import store from 'app/core/store';
|
||||
|
||||
@@ -17,8 +17,8 @@ import {
|
||||
isFetchError,
|
||||
locationService,
|
||||
} from '@grafana/runtime';
|
||||
import { updateNavIndex } from 'app/core/actions';
|
||||
import { appEvents } from 'app/core/app_events';
|
||||
import { updateNavIndex } from 'app/core/reducers/navModel';
|
||||
import { getBackendSrv } from 'app/core/services/backend_srv';
|
||||
import { contextSrv } from 'app/core/services/context_srv';
|
||||
import { DatasourceAPIVersions } from 'app/features/apiserver/client';
|
||||
|
||||
@@ -73,7 +73,7 @@ import {
|
||||
contentOutlineTrackUnpinClicked,
|
||||
} from '../ContentOutline/ContentOutlineAnalyticEvents';
|
||||
import { useContentOutlineContext } from '../ContentOutline/ContentOutlineContext';
|
||||
import { getUrlStateFromPaneState } from '../hooks/useStateSync';
|
||||
import { getUrlStateFromPaneState } from '../hooks/useStateSync/external.utils';
|
||||
import { changePanelState } from '../state/explorePane';
|
||||
import { changeQueries, runQueries } from '../state/query';
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import {
|
||||
import { t } from '@grafana/i18n';
|
||||
import { ClipboardButton, CustomCellRendererProps, IconButton, Modal, useTheme2 } from '@grafana/ui';
|
||||
import { getLogsPermalinkRange } from 'app/core/utils/shortLinks';
|
||||
import { getUrlStateFromPaneState } from 'app/features/explore/hooks/useStateSync';
|
||||
import { getUrlStateFromPaneState } from 'app/features/explore/hooks/useStateSync/external.utils';
|
||||
import { LogsFrame, DATAPLANE_ID_NAME } from 'app/features/logs/logsFrame';
|
||||
import { getState } from 'app/store/store';
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user