mirror of
https://github.com/grafana/grafana.git
synced 2026-01-10 22:14:04 +08:00
Compare commits
1 Commits
authnwithf
...
rbac-provi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bee7eb0fcb |
@@ -68,14 +68,14 @@ require (
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/at-wat/mqtt-go v0.19.6 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 // indirect
|
||||
github.com/aws/smithy-go v1.23.2 // indirect
|
||||
github.com/aws/smithy-go v1.23.1 // indirect
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df // indirect
|
||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
|
||||
@@ -173,8 +173,8 @@ github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=
|
||||
github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc=
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1 h1:fWZhGAwVRK/fAN2tmt7ilH4PPAE11rDj7HytrmbZ2FE=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 h1:12SpdwU8Djs+YGklkinSSlcrPyj3H4VifVsKf78KbwA=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11/go.mod h1:dd+Lkp6YmMryke+qxW/VnKyhMBDTYP41Q2Bb+6gNZgY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.10 h1:7LllDZAegXU3yk41mwM6KcPu0wmjKGQB1bg99bNdQm4=
|
||||
@@ -185,10 +185,10 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8 h1:gLD09eaJUdiszm7vd1btiQU
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8/go.mod h1:4RW3oMPt1POR74qVOC4SbubxAwdP4pCT0nSw3jycOU4=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84 h1:cTXRdLkpBanlDwISl+5chq5ui1d1YWg4PWMR9c3kXyw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84/go.mod h1:kwSy5X7tfIHN39uucmjQVs2LvDdXEjQucgQQEqCggEo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 h1:6bgAZgRyT4RoFWhxS+aoGMFyE0cD1bSzFnEEi4bFPGI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8/go.mod h1:KcGkXFVU8U28qS4KvLEcPxytPZPBcRawaH2Pf/0jptE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 h1:HhJYoES3zOz34yWEpGENqJvRVPqpmJyR3+AFg9ybhdY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8/go.mod h1:JnA+hPWeYAVbDssp83tv+ysAG8lTfLVXvSsyKg/7xNA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 h1:GMYy2EOWfzdP3wfVAGXBNKY5vK4K8vMET4sYOYltmqs=
|
||||
@@ -209,8 +209,8 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0 h1:I7ghctfGXrscr7r1Ga/mDqSJ
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0/go.mod h1:Zo9id81XP6jbayIFWNuDpA6lMBWhsVy+3ou2jLa4JnA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 h1:+LVB0xBqEgjQoqr9bGZbRzvg212B0f17JdflleJRNR4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5/go.mod h1:xoaxeqnnUaZjPjaICgIy5B+MHCSb/ZSOn4MvkFNOUA0=
|
||||
github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM=
|
||||
github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/aws/smithy-go v1.23.1 h1:sLvcH6dfAFwGkHLZ7dGiYF7aK6mg4CgKA/iDKjLDt9M=
|
||||
github.com/aws/smithy-go v1.23.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
|
||||
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df h1:GSoSVRLoBaFpOOds6QyY1L8AX7uoY+Ln3BHc22W40X0=
|
||||
|
||||
@@ -106,14 +106,14 @@ require (
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/at-wat/mqtt-go v0.19.6 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect
|
||||
@@ -124,7 +124,7 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 // indirect
|
||||
github.com/aws/smithy-go v1.23.2 // indirect
|
||||
github.com/aws/smithy-go v1.23.1 // indirect
|
||||
github.com/bahlo/generic-list-go v0.2.0 // indirect
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df // indirect
|
||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||
|
||||
@@ -238,8 +238,8 @@ github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=
|
||||
github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc=
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1 h1:fWZhGAwVRK/fAN2tmt7ilH4PPAE11rDj7HytrmbZ2FE=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 h1:12SpdwU8Djs+YGklkinSSlcrPyj3H4VifVsKf78KbwA=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11/go.mod h1:dd+Lkp6YmMryke+qxW/VnKyhMBDTYP41Q2Bb+6gNZgY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.10 h1:7LllDZAegXU3yk41mwM6KcPu0wmjKGQB1bg99bNdQm4=
|
||||
@@ -250,10 +250,10 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8 h1:gLD09eaJUdiszm7vd1btiQU
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8/go.mod h1:4RW3oMPt1POR74qVOC4SbubxAwdP4pCT0nSw3jycOU4=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84 h1:cTXRdLkpBanlDwISl+5chq5ui1d1YWg4PWMR9c3kXyw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84/go.mod h1:kwSy5X7tfIHN39uucmjQVs2LvDdXEjQucgQQEqCggEo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 h1:6bgAZgRyT4RoFWhxS+aoGMFyE0cD1bSzFnEEi4bFPGI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8/go.mod h1:KcGkXFVU8U28qS4KvLEcPxytPZPBcRawaH2Pf/0jptE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 h1:HhJYoES3zOz34yWEpGENqJvRVPqpmJyR3+AFg9ybhdY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8/go.mod h1:JnA+hPWeYAVbDssp83tv+ysAG8lTfLVXvSsyKg/7xNA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 h1:GMYy2EOWfzdP3wfVAGXBNKY5vK4K8vMET4sYOYltmqs=
|
||||
@@ -280,16 +280,14 @@ github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6 h1:Pwbxovp
|
||||
github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6/go.mod h1:Z4xLt5mXspLKjBV92i165wAJ/3T6TIv4n7RtIS8pWV0=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.0 h1:0reDqfEN+tB+sozj2r92Bep8MEwBZgtAXTND1Kk9OXg=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.0/go.mod h1:kUklwasNoCn5YpyAqC/97r6dzTA1SRKJfKq16SXeoDU=
|
||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.40.1 h1:w6a0H79HrHf3lr+zrw+pSzR5B+caiQFAKiNHlrUcnoc=
|
||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.40.1/go.mod h1:c6Vg0BRiU7v0MVhHupw90RyL120QBwAMLbDCzptGeMk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.4 h1:FTdEN9dtWPB0EOURNtDPmwGp6GGvMqRJCAihkSl/1No=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.4/go.mod h1:mYubxV9Ff42fZH4kexj43gFPhgc/LyC7KqvUKt1watc=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0 h1:I7ghctfGXrscr7r1Ga/mDqSJKm7Fkpl5Mwq79Z+rZqU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0/go.mod h1:Zo9id81XP6jbayIFWNuDpA6lMBWhsVy+3ou2jLa4JnA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 h1:+LVB0xBqEgjQoqr9bGZbRzvg212B0f17JdflleJRNR4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5/go.mod h1:xoaxeqnnUaZjPjaICgIy5B+MHCSb/ZSOn4MvkFNOUA0=
|
||||
github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM=
|
||||
github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/aws/smithy-go v1.23.1 h1:sLvcH6dfAFwGkHLZ7dGiYF7aK6mg4CgKA/iDKjLDt9M=
|
||||
github.com/aws/smithy-go v1.23.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27 h1:60m4tnanN1ctzIu4V3bfCNJ39BiOPSm1gHFlFjTkRE0=
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c=
|
||||
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
|
||||
@@ -2323,8 +2321,6 @@ modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk=
|
||||
pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
|
||||
@@ -30,14 +30,14 @@ require (
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
|
||||
github.com/apache/arrow-go/v18 v18.4.1 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 // indirect
|
||||
github.com/aws/smithy-go v1.23.2 // indirect
|
||||
github.com/aws/smithy-go v1.23.1 // indirect
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
|
||||
@@ -28,22 +28,22 @@ github.com/apache/thrift v0.22.0 h1:r7mTJdj51TMDe6RtcmNdQxgn9XcyfGDOzegMDRg47uc=
|
||||
github.com/apache/thrift v0.22.0/go.mod h1:1e7J/O1Ae6ZQMTYdy9xa3w9k+XHWPfRvdPyJeynQ+/g=
|
||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc=
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1 h1:fWZhGAwVRK/fAN2tmt7ilH4PPAE11rDj7HytrmbZ2FE=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14 h1:TxkI7QI+sFkTItN/6cJuMZEIVMFXeu2dI1ZffkXngKI=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14/go.mod h1:12x4Uw/vijC11XkctTjy92TNCQ+UnNJkT7fzX0Yd93E=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 h1:6bgAZgRyT4RoFWhxS+aoGMFyE0cD1bSzFnEEi4bFPGI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8/go.mod h1:KcGkXFVU8U28qS4KvLEcPxytPZPBcRawaH2Pf/0jptE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 h1:HhJYoES3zOz34yWEpGENqJvRVPqpmJyR3+AFg9ybhdY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8/go.mod h1:JnA+hPWeYAVbDssp83tv+ysAG8lTfLVXvSsyKg/7xNA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8 h1:M6JI2aGFEzYxsF6CXIuRBnkge9Wf9a2xU39rNeXgu10=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8/go.mod h1:Fw+MyTwlwjFsSTE31mH211Np+CUslml8mzc0AFEG09s=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 h1:+LVB0xBqEgjQoqr9bGZbRzvg212B0f17JdflleJRNR4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5/go.mod h1:xoaxeqnnUaZjPjaICgIy5B+MHCSb/ZSOn4MvkFNOUA0=
|
||||
github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM=
|
||||
github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/aws/smithy-go v1.23.1 h1:sLvcH6dfAFwGkHLZ7dGiYF7aK6mg4CgKA/iDKjLDt9M=
|
||||
github.com/aws/smithy-go v1.23.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df h1:GSoSVRLoBaFpOOds6QyY1L8AX7uoY+Ln3BHc22W40X0=
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df/go.mod h1:hiVxq5OP2bUGBRNS3Z/bt/reCLFNbdcST6gISi1fiOM=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
|
||||
@@ -30,20 +30,9 @@ KeeperSpec: {
|
||||
}
|
||||
|
||||
#AWSConfig: {
|
||||
region: string
|
||||
accessKey?: #AWSAccessKey
|
||||
assumeRole?: #AWSAssumeRole
|
||||
kmsKeyID?: string
|
||||
}
|
||||
|
||||
#AWSAccessKey: {
|
||||
accessKeyID: #CredentialValue
|
||||
accessKeyID: #CredentialValue
|
||||
secretAccessKey: #CredentialValue
|
||||
}
|
||||
|
||||
#AWSAssumeRole: {
|
||||
assumeRoleArn: string
|
||||
externalID: string
|
||||
kmsKeyID?: string
|
||||
}
|
||||
|
||||
#AzureConfig: {
|
||||
|
||||
@@ -4,26 +4,14 @@ package v1beta1
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type KeeperAWSConfig struct {
|
||||
Region string `json:"region"`
|
||||
AccessKey *KeeperAWSAccessKey `json:"accessKey,omitempty"`
|
||||
AssumeRole *KeeperAWSAssumeRole `json:"assumeRole,omitempty"`
|
||||
KmsKeyID *string `json:"kmsKeyID,omitempty"`
|
||||
AccessKeyID KeeperCredentialValue `json:"accessKeyID"`
|
||||
SecretAccessKey KeeperCredentialValue `json:"secretAccessKey"`
|
||||
KmsKeyID *string `json:"kmsKeyID,omitempty"`
|
||||
}
|
||||
|
||||
// NewKeeperAWSConfig creates a new KeeperAWSConfig object.
|
||||
func NewKeeperAWSConfig() *KeeperAWSConfig {
|
||||
return &KeeperAWSConfig{}
|
||||
}
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type KeeperAWSAccessKey struct {
|
||||
AccessKeyID KeeperCredentialValue `json:"accessKeyID"`
|
||||
SecretAccessKey KeeperCredentialValue `json:"secretAccessKey"`
|
||||
}
|
||||
|
||||
// NewKeeperAWSAccessKey creates a new KeeperAWSAccessKey object.
|
||||
func NewKeeperAWSAccessKey() *KeeperAWSAccessKey {
|
||||
return &KeeperAWSAccessKey{
|
||||
return &KeeperAWSConfig{
|
||||
AccessKeyID: *NewKeeperCredentialValue(),
|
||||
SecretAccessKey: *NewKeeperCredentialValue(),
|
||||
}
|
||||
@@ -48,17 +36,6 @@ func NewKeeperCredentialValue() *KeeperCredentialValue {
|
||||
return &KeeperCredentialValue{}
|
||||
}
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type KeeperAWSAssumeRole struct {
|
||||
AssumeRoleArn string `json:"assumeRoleArn"`
|
||||
ExternalID string `json:"externalID"`
|
||||
}
|
||||
|
||||
// NewKeeperAWSAssumeRole creates a new KeeperAWSAssumeRole object.
|
||||
func NewKeeperAWSAssumeRole() *KeeperAWSAssumeRole {
|
||||
return &KeeperAWSAssumeRole{}
|
||||
}
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type KeeperAzureConfig struct {
|
||||
KeyVaultName string `json:"keyVaultName"`
|
||||
|
||||
@@ -12,7 +12,6 @@ const (
|
||||
AzureKeeperType KeeperType = "azure"
|
||||
GCPKeeperType KeeperType = "gcp"
|
||||
HashiCorpKeeperType KeeperType = "hashicorp"
|
||||
SystemKeeperType KeeperType = "system"
|
||||
)
|
||||
|
||||
func (kt KeeperType) String() string {
|
||||
@@ -21,31 +20,9 @@ func (kt KeeperType) String() string {
|
||||
|
||||
// KeeperConfig is an interface that all keeper config types must implement.
|
||||
type KeeperConfig interface {
|
||||
// Returns the name of the keeper
|
||||
GetName() string
|
||||
Type() KeeperType
|
||||
}
|
||||
|
||||
type NamedKeeperConfig[T interface {
|
||||
Type() KeeperType
|
||||
}] struct {
|
||||
Name string
|
||||
Cfg T
|
||||
}
|
||||
|
||||
func NewNamedKeeperConfig[T interface {
|
||||
Type() KeeperType
|
||||
}](keeperName string, cfg T) *NamedKeeperConfig[T] {
|
||||
return &NamedKeeperConfig[T]{Name: keeperName, Cfg: cfg}
|
||||
}
|
||||
|
||||
func (c *NamedKeeperConfig[T]) GetName() string {
|
||||
return c.Name
|
||||
}
|
||||
func (c *NamedKeeperConfig[T]) Type() KeeperType {
|
||||
return c.Cfg.Type()
|
||||
}
|
||||
|
||||
func (s *KeeperSpec) GetType() KeeperType {
|
||||
if s.Aws != nil {
|
||||
return AWSKeeperType
|
||||
@@ -66,7 +43,7 @@ func (s *KeeperSpec) GetType() KeeperType {
|
||||
type SystemKeeperConfig struct{}
|
||||
|
||||
func (*SystemKeeperConfig) Type() KeeperType {
|
||||
return SystemKeeperType
|
||||
return "system"
|
||||
}
|
||||
|
||||
func (s *KeeperAWSConfig) Type() KeeperType {
|
||||
|
||||
@@ -14,8 +14,6 @@ import (
|
||||
func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
|
||||
return map[string]common.OpenAPIDefinition{
|
||||
"github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.Keeper": schema_pkg_apis_secret_v1beta1_Keeper(ref),
|
||||
"github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperAWSAccessKey": schema_pkg_apis_secret_v1beta1_KeeperAWSAccessKey(ref),
|
||||
"github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperAWSAssumeRole": schema_pkg_apis_secret_v1beta1_KeeperAWSAssumeRole(ref),
|
||||
"github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperAWSConfig": schema_pkg_apis_secret_v1beta1_KeeperAWSConfig(ref),
|
||||
"github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperAzureConfig": schema_pkg_apis_secret_v1beta1_KeeperAzureConfig(ref),
|
||||
"github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperCredentialValue": schema_pkg_apis_secret_v1beta1_KeeperCredentialValue(ref),
|
||||
@@ -81,7 +79,7 @@ func schema_pkg_apis_secret_v1beta1_Keeper(ref common.ReferenceCallback) common.
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_secret_v1beta1_KeeperAWSAccessKey(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
func schema_pkg_apis_secret_v1beta1_KeeperAWSConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
@@ -99,65 +97,6 @@ func schema_pkg_apis_secret_v1beta1_KeeperAWSAccessKey(ref common.ReferenceCallb
|
||||
Ref: ref("github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperCredentialValue"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"accessKeyID", "secretAccessKey"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperCredentialValue"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_secret_v1beta1_KeeperAWSAssumeRole(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"assumeRoleArn": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"externalID": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"assumeRoleArn", "externalID"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_secret_v1beta1_KeeperAWSConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"region": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"accessKey": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Ref: ref("github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperAWSAccessKey"),
|
||||
},
|
||||
},
|
||||
"assumeRole": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Ref: ref("github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperAWSAssumeRole"),
|
||||
},
|
||||
},
|
||||
"kmsKeyID": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"string"},
|
||||
@@ -165,11 +104,11 @@ func schema_pkg_apis_secret_v1beta1_KeeperAWSConfig(ref common.ReferenceCallback
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"region"},
|
||||
Required: []string{"accessKeyID", "secretAccessKey"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperAWSAccessKey", "github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperAWSAssumeRole"},
|
||||
"github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperCredentialValue"},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ description: Learn about RBAC Grafana provisioning and view an example YAML prov
|
||||
file that configures Grafana role assignments.
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
menuTitle: Provisioning RBAC with Grafana
|
||||
title: Provisioning RBAC with Grafana
|
||||
|
||||
@@ -105,11 +105,6 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/visualizations/panels-visualizations/visualizations/
|
||||
cloudwatch-troubleshooting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/troubleshooting/
|
||||
---
|
||||
|
||||
# Amazon CloudWatch data source
|
||||
@@ -124,7 +119,6 @@ The following documents will help you get started working with the CloudWatch da
|
||||
- [CloudWatch query editor](ref:cloudwatch-query-editor)
|
||||
- [Templates and variables](ref:cloudwatch-template-variables)
|
||||
- [Configure AWS authentication](ref:cloudwatch-aws-authentication)
|
||||
- [Troubleshoot CloudWatch issues](ref:cloudwatch-troubleshooting)
|
||||
|
||||
## Import pre-configured dashboards
|
||||
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/aws-cloudwatch/configure/
|
||||
- ../../data-sources/aws-cloudwatch/
|
||||
- ../../data-sources/aws-cloudwatch/preconfig-cloudwatch-dashboards/
|
||||
- ../../data-sources/aws-cloudwatch/provision-cloudwatch/
|
||||
- ../cloudwatch/
|
||||
- ../preconfig-cloudwatch-dashboards/
|
||||
- ../provision-cloudwatch/
|
||||
- ../data-sources/aws-CloudWatch/
|
||||
- ../data-sources/aws-CloudWatch/preconfig-CloudWatch-dashboards/
|
||||
- ../data-sources/aws-CloudWatch/provision-CloudWatch/
|
||||
- CloudWatch/
|
||||
- preconfig-CloudWatch-dashboards/
|
||||
- provision-CloudWatch/
|
||||
description: This document provides configuration instructions for the CloudWatch data source.
|
||||
keywords:
|
||||
- grafana
|
||||
@@ -26,6 +25,11 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/logs/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/logs/
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
provisioning-data-sources:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
@@ -36,6 +40,16 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#aws
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#aws
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/
|
||||
build-dashboards:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
data-source-management:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
@@ -139,7 +153,7 @@ You must use both an access key ID and a secret access key to authenticate.
|
||||
|
||||
Grafana automatically creates a link to a trace in X-Ray data source if logs contain the `@xrayTraceId` field. To use this feature, you must already have an X-Ray data source configured. For details, see the [X-Ray data source docs](/grafana/plugins/grafana-X-Ray-datasource/). To view the X-Ray link, select the log row in either the Explore view or dashboard [Logs panel](ref:logs) to view the log details section.
|
||||
|
||||
To log the `@xrayTraceId`, refer to the [AWS X-Ray documentation](https://docs.aws.amazon.com/xray/latest/devguide/xray-services.html). To provide the field to Grafana, your log queries must also contain the `@xrayTraceId` field, for example by using the query `fields @message, @xrayTraceId`.
|
||||
To log the `@xrayTraceId`, refer to the [AWS X-Ray documentation](https://docs.amazonaws.cn/en_us/xray/latest/devguide/xray-services.html). To provide the field to Grafana, your log queries must also contain the `@xrayTraceId` field, for example by using the query `fields @message, @xrayTraceId`.
|
||||
|
||||
**Private data source connect** - _Only for Grafana Cloud users._
|
||||
|
||||
|
||||
@@ -34,6 +34,11 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/#navigate-the-query-tab
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/#navigate-the-query-tab
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
@@ -178,7 +183,7 @@ If you use the expression field to reference another query, such as `queryA * 2`
|
||||
When you select `Builder` mode within the Metric search editor, a new Account field is displayed. Use the `Account` field to specify which of the linked monitoring accounts to target for the given query. By default, the `All` option is specified, which will target all linked accounts.
|
||||
|
||||
While in `Code` mode, you can specify any math expression. If the Monitoring account badge displays in the query editor header, all `SEARCH` expressions entered in this field will be cross-account by default and can query metrics from linked accounts. Note that while queries run cross-account, the autocomplete feature currently doesn't fetch cross-account resources, so you'll need to manually specify resource names when writing cross-account queries.
|
||||
You can limit the search to one or a set of accounts, as documented in the [AWS documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html).
|
||||
You can limit the search to one or a set of accounts, as documented in the [AWS documentation](http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html).
|
||||
|
||||
### Period macro
|
||||
|
||||
@@ -193,7 +198,7 @@ The link provided is valid for any account but displays the expected metrics onl
|
||||
|
||||
{{< figure src="/media/docs/cloudwatch/cloudwatch-deep-link-v12.1.png" caption="CloudWatch deep linking" >}}
|
||||
|
||||
This feature is not available for metrics based on [metric math expressions](#use-metric-math-expressions).
|
||||
This feature is not available for metrics based on [metric math expressions](#metric-math-expressions).
|
||||
|
||||
### Use Metric Insights syntax
|
||||
|
||||
@@ -314,9 +319,9 @@ The CloudWatch plugin monitors and troubleshoots applications that span multiple
|
||||
|
||||
To enable cross-account observability, complete the following steps:
|
||||
|
||||
1. Go to the [Amazon CloudWatch documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html) and follow the instructions for enabling cross-account observability.
|
||||
1. Go to the [Amazon CloudWatch documentation](http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html) and follow the instructions for enabling cross-account observability.
|
||||
|
||||
1. Add [two API actions](https://grafana.com/docs/grafana/latest/datasources/aws-cloudwatch/configure/#cross-account-observability-permissions) to the IAM policy attached to the role/user running the plugin.
|
||||
1. Add [two API actions](https://grafana.com//docs/grafana/latest/datasources/aws-cloudwatch/configure/#cross-account-observability-permissions) to the IAM policy attached to the role/user running the plugin.
|
||||
|
||||
Cross-account querying is available in the plugin through the **Logs**, **Metric search**, and **Metric Insights** modes.
|
||||
After you have configured it, you'll see a **Monitoring account** badge in the query editor header.
|
||||
|
||||
@@ -1,519 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/aws-cloudwatch/troubleshooting/
|
||||
description: Troubleshooting guide for the Amazon CloudWatch data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- cloudwatch
|
||||
- aws
|
||||
- troubleshooting
|
||||
- errors
|
||||
- authentication
|
||||
- query
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Troubleshooting
|
||||
title: Troubleshoot Amazon CloudWatch data source issues
|
||||
weight: 500
|
||||
refs:
|
||||
configure-cloudwatch:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/configure/
|
||||
cloudwatch-aws-authentication:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/
|
||||
cloudwatch-template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/template-variables/
|
||||
cloudwatch-query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/query-editor/
|
||||
private-data-source-connect:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
---
|
||||
|
||||
# Troubleshoot Amazon CloudWatch data source issues
|
||||
|
||||
This document provides solutions to common issues you may encounter when configuring or using the Amazon CloudWatch data source. For configuration instructions, refer to [Configure CloudWatch](ref:configure-cloudwatch).
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The data source health check validates both metrics and logs permissions. If your IAM policy only grants access to one of these (for example, metrics-only or logs-only), the health check displays a red status. However, the service you have permissions for is still usable—you can query metrics or logs based on whichever permissions are configured.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Authentication errors
|
||||
|
||||
These errors occur when AWS credentials are invalid, missing, or don't have the required permissions.
|
||||
|
||||
### "Access Denied" or "Not authorized to perform this operation"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Save & test fails with "Access Denied"
|
||||
- Queries return authorization errors
|
||||
- Namespaces, metrics, or dimensions don't load
|
||||
|
||||
**Possible causes and solutions:**
|
||||
|
||||
| Cause | Solution |
|
||||
| --------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| IAM policy missing required permissions | Attach the appropriate IAM policy to your user or role. For metrics, you need `cloudwatch:ListMetrics`, `cloudwatch:GetMetricData`, and related permissions. For logs, you need `logs:DescribeLogGroups`, `logs:StartQuery`, `logs:GetQueryResults`, and related permissions. Refer to [Configure CloudWatch](ref:configure-cloudwatch) for complete policy examples. |
|
||||
| Incorrect access key or secret key | Verify the credentials in the AWS Console under **IAM** > **Users** > your user > **Security credentials**. Generate new credentials if necessary. |
|
||||
| Credentials have expired | For temporary credentials, generate new ones. For access keys, verify they haven't been deactivated or deleted. |
|
||||
| Wrong AWS region | Verify the default region in the data source configuration matches where your resources are located. |
|
||||
| Assume Role ARN is incorrect | Verify the role ARN format: `arn:aws:iam::<account-id>:role/<role-name>`. Check that the role exists in the AWS Console. |
|
||||
|
||||
### "Unable to assume role"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Authentication fails when using Assume Role ARN
|
||||
- Error message references STS or AssumeRole
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the trust relationship on the IAM role allows the Grafana credentials to assume it.
|
||||
1. Check the trust policy includes the correct principal (the user or role running Grafana).
|
||||
1. If using an external ID, ensure it matches exactly in both the role's trust policy and the Grafana data source configuration.
|
||||
1. Verify the base credentials have the `sts:AssumeRole` permission.
|
||||
1. Check that the role ARN is correct and the role exists.
|
||||
|
||||
**Example trust policy:**
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"AWS": "arn:aws:iam::<your-account-id>:user/<grafana-user>"
|
||||
},
|
||||
"Action": "sts:AssumeRole",
|
||||
"Condition": {
|
||||
"StringEquals": {
|
||||
"sts:ExternalId": "<your-external-id>"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### AWS SDK Default authentication not working
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Data source test fails when using AWS SDK Default
|
||||
- Works locally but fails in production
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify AWS credentials are configured in the environment where Grafana runs.
|
||||
1. Check for credentials in the default locations:
|
||||
- Environment variables (`AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`)
|
||||
- Shared credentials file (`~/.aws/credentials`)
|
||||
- EC2 instance metadata (if running on EC2)
|
||||
- ECS task role (if running in ECS)
|
||||
- EKS service account (if running in EKS)
|
||||
1. Ensure the Grafana process has permission to read the credentials file.
|
||||
1. For EKS with IRSA, set the pod's security context to allow user 472 (grafana) to access the projected token. Refer to [AWS authentication](ref:cloudwatch-aws-authentication) for details.
|
||||
|
||||
### Credentials file not found
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Error indicates credentials file cannot be read
|
||||
- Authentication fails with "Credentials file" option
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Create the credentials file at `~/.aws/credentials` for the user running the `grafana-server` service.
|
||||
1. Verify the file has correct permissions (`0644`).
|
||||
1. If the file exists but isn't working, move it to `/usr/share/grafana/` and set permissions to `0644`.
|
||||
1. Ensure the profile name in the data source configuration matches a profile in the credentials file.
|
||||
|
||||
## Connection errors
|
||||
|
||||
These errors occur when Grafana cannot reach AWS CloudWatch endpoints.
|
||||
|
||||
### "Request timed out" or connection failures
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Data source test times out
|
||||
- Queries fail with timeout errors
|
||||
- Intermittent connection issues
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify network connectivity from the Grafana server to AWS endpoints.
|
||||
1. Check firewall rules allow outbound HTTPS (port 443) to AWS services.
|
||||
1. If using a VPC, ensure proper NAT gateway or VPC endpoint configuration.
|
||||
1. For Grafana Cloud connecting to private resources, configure [Private data source connect](ref:private-data-source-connect).
|
||||
1. Check if the default region is correct—incorrect regions may cause longer timeouts.
|
||||
1. Increase the timeout settings if queries involve large data volumes.
|
||||
|
||||
### Custom endpoint configuration issues
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Connection fails when using a custom endpoint
|
||||
- Endpoint URL rejected
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the endpoint URL format is correct.
|
||||
1. Ensure the endpoint is accessible from the Grafana server.
|
||||
1. Check that the endpoint supports the required AWS APIs.
|
||||
1. For VPC endpoints, verify the endpoint policy allows the required actions.
|
||||
|
||||
## CloudWatch Metrics query errors
|
||||
|
||||
These errors occur when querying CloudWatch Metrics.
|
||||
|
||||
### "No data" or empty results
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query executes without error but returns no data
|
||||
- Charts show "No data" message
|
||||
|
||||
**Possible causes and solutions:**
|
||||
|
||||
| Cause | Solution |
|
||||
| ------------------------------- | ---------------------------------------------------------------------------------------------------------- |
|
||||
| Time range doesn't contain data | Expand the dashboard time range. CloudWatch metrics have different retention periods based on resolution. |
|
||||
| Wrong namespace or metric name | Verify the namespace (for example, `AWS/EC2`) and metric name (for example, `CPUUtilization`) are correct. |
|
||||
| Incorrect dimensions | Ensure dimension names and values match your AWS resources exactly. |
|
||||
| Match Exact enabled incorrectly | When Match Exact is enabled, all dimensions must be specified. Try disabling it to see if metrics appear. |
|
||||
| Period too large | Reduce the period setting or set it to "auto" to ensure data points are returned for your time range. |
|
||||
| Custom metrics not configured | Add custom metric namespaces in the data source configuration under **Namespaces of Custom Metrics**. |
|
||||
|
||||
### "Metric not found" or metrics don't appear in drop-down
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Expected metrics don't appear in the query editor
|
||||
- Metric drop-down is empty for a namespace
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the metric exists in the selected region.
|
||||
1. For custom metrics, add the namespace to **Namespaces of Custom Metrics** in the data source configuration.
|
||||
1. Check that the IAM policy includes `cloudwatch:ListMetrics` permission.
|
||||
1. CloudWatch limits `ListMetrics` to 500 results per page. To retrieve more metrics, increase the `list_metrics_page_limit` setting in the [Grafana configuration file](https://grafana.com/docs/grafana/latest/datasources/aws-cloudwatch/configure/#configure-the-data-source-with-grafanaini).
|
||||
1. Use the Query Inspector to verify the API request and response.
|
||||
|
||||
### Dimension values not loading
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Dimension value drop-down doesn't populate
|
||||
- Wildcard searches return no results
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the IAM policy includes `cloudwatch:ListMetrics` permission.
|
||||
1. Check that the namespace and metric are selected before dimension values can load.
|
||||
1. For EC2 dimensions, ensure `ec2:DescribeTags` and `ec2:DescribeInstances` permissions are granted.
|
||||
1. Dimension values require existing metrics—if no metrics match, no values appear.
|
||||
|
||||
### "Too many data points" or API throttling
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Queries fail with throttling errors
|
||||
- Performance degrades with multiple panels
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Increase the period setting to reduce the number of data points.
|
||||
1. Reduce the time range of your queries.
|
||||
1. Use fewer dimensions or wildcard queries per panel.
|
||||
1. Request a quota increase for `GetMetricData` requests per second in the [AWS Service Quotas console](https://console.aws.amazon.com/servicequotas/).
|
||||
1. Enable query caching in Grafana to reduce API calls.
|
||||
|
||||
### Metric math expression errors
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Expression returns errors
|
||||
- Referenced metrics not found
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify each referenced metric has a unique ID set.
|
||||
1. Check that metric IDs start with a lowercase letter and contain only letters, numbers, and underscores.
|
||||
1. Ensure all referenced metrics are in the same query.
|
||||
1. Verify the expression syntax follows [AWS Metric Math](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html) documentation.
|
||||
1. Metric math expressions can't be used with Grafana alerting if they reference other query rows.
|
||||
|
||||
## CloudWatch Logs query errors
|
||||
|
||||
These errors occur when querying CloudWatch Logs.
|
||||
|
||||
### "Query failed" or logs don't appear
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Log queries return errors
|
||||
- No log data is displayed
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify log group names are correct and exist in the selected region.
|
||||
1. Check the IAM policy includes `logs:StartQuery`, `logs:GetQueryResults`, and `logs:DescribeLogGroups` permissions.
|
||||
1. Ensure the time range contains log data.
|
||||
1. Verify the query syntax is valid. For CloudWatch Logs Insights QL, test the query in the AWS Console.
|
||||
1. Select the correct query language (Logs Insights QL, OpenSearch PPL, or OpenSearch SQL) based on your query syntax.
|
||||
|
||||
### Log query timeout
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query runs for a long time then fails
|
||||
- Error mentions timeout
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Increase the **Query timeout result** setting in the data source configuration (default is 30 minutes).
|
||||
1. Narrow the time range to reduce the amount of data scanned.
|
||||
1. Add filters to your query to limit results.
|
||||
1. Break complex queries into smaller, more focused queries.
|
||||
1. For alerting, the timeout defined in the [Grafana configuration file](https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#unified_alerting) takes precedence.
|
||||
|
||||
### Log groups not appearing in selector
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Log group selector is empty
|
||||
- Can't find expected log groups
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the IAM policy includes `logs:DescribeLogGroups` permission.
|
||||
1. Check that log groups exist in the selected region.
|
||||
1. For cross-account observability, ensure proper IAM permissions for `oam:ListSinks` and `oam:ListAttachedLinks`.
|
||||
1. Use prefix search to filter log groups if you have many groups.
|
||||
1. Verify the selected account (for cross-account) contains the expected log groups.
|
||||
|
||||
### OpenSearch SQL query errors
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- OpenSearch SQL queries fail
|
||||
- Syntax errors with SQL queries
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Specify the log group identifier or ARN in the `FROM` clause:
|
||||
|
||||
```sql
|
||||
SELECT * FROM `log_group_name` WHERE `@message` LIKE '%error%'
|
||||
```
|
||||
|
||||
1. For multiple log groups, use the `logGroups` function:
|
||||
|
||||
```sql
|
||||
SELECT * FROM `logGroups(logGroupIdentifier: ['LogGroup1', 'LogGroup2'])`
|
||||
```
|
||||
|
||||
1. Amazon CloudWatch supports only a subset of OpenSearch SQL commands. Refer to the [CloudWatch Logs documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_AnalyzeLogData_Languages.html) for supported syntax.
|
||||
|
||||
## Template variable errors
|
||||
|
||||
These errors occur when using template variables with the CloudWatch data source.
|
||||
|
||||
### Variables return no values
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Variable drop-down is empty
|
||||
- Dashboard fails to load with variable errors
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the data source connection is working.
|
||||
1. Check that the IAM policy includes permissions for the variable query type:
|
||||
- **Regions:** No additional permissions needed.
|
||||
- **Namespaces:** No additional permissions needed.
|
||||
- **Metrics:** Requires `cloudwatch:ListMetrics`.
|
||||
- **Dimension Values:** Requires `cloudwatch:ListMetrics`.
|
||||
- **EC2 Instance Attributes:** Requires `ec2:DescribeInstances`.
|
||||
- **EBS Volume IDs:** Requires `ec2:DescribeVolumes`.
|
||||
- **Resource ARNs:** Requires `tag:GetResources`.
|
||||
- **Log Groups:** Requires `logs:DescribeLogGroups`.
|
||||
1. For dependent variables, ensure parent variables have valid selections.
|
||||
1. Verify the region is set correctly (use "default" for the data source's default region).
|
||||
|
||||
For more information on template variables, refer to [CloudWatch template variables](ref:cloudwatch-template-variables).
|
||||
|
||||
### Multi-value template variables cause query failures
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Queries fail when selecting multiple dimension values
|
||||
- Error about search expression limits
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Search expressions are limited to 1,024 characters. Reduce the number of selected values.
|
||||
1. Use the asterisk (`*`) wildcard instead of selecting "All" to query all metrics for a dimension.
|
||||
1. Multi-valued template variables are only supported for dimension values—not for Region, Namespace, or Metric Name.
|
||||
|
||||
## Cross-account observability errors
|
||||
|
||||
These errors occur when using CloudWatch cross-account observability features.
|
||||
|
||||
### Cross-account queries fail
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Can't query metrics or logs from linked accounts
|
||||
- Monitoring account badge doesn't appear
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify cross-account observability is configured in the AWS CloudWatch console.
|
||||
1. Add the required IAM permissions:
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": ["oam:ListSinks", "oam:ListAttachedLinks"],
|
||||
"Effect": "Allow",
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
1. Check that the monitoring account and source accounts are properly linked in AWS.
|
||||
1. Cross-account observability works within a single region—verify all accounts are in the same region.
|
||||
1. EC2 Instance Attributes can't be queried across accounts because they use the EC2 API, not the CloudWatch API.
|
||||
|
||||
## Quota and pricing issues
|
||||
|
||||
These issues relate to AWS service quotas and cost management.
|
||||
|
||||
### API throttling errors
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- "Rate exceeded" errors
|
||||
- Dashboard panels intermittently fail to load
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Reduce the frequency of dashboard refreshes.
|
||||
1. Increase the period setting to reduce `GetMetricData` requests.
|
||||
1. Enable query caching in Grafana (available in Grafana Enterprise and Grafana Cloud).
|
||||
1. Request a quota increase in the [AWS Service Quotas console](https://console.aws.amazon.com/servicequotas/).
|
||||
1. Consider consolidating similar queries using metric math.
|
||||
|
||||
### Unexpectedly high CloudWatch costs
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- AWS CloudWatch costs are higher than expected
|
||||
- Frequent API calls from Grafana
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. The `GetMetricData` API doesn't qualify for the CloudWatch API free tier.
|
||||
1. Reduce dashboard auto-refresh frequency.
|
||||
1. Increase the period setting to reduce data points returned.
|
||||
1. Use query caching to reduce repeated API calls.
|
||||
1. Review variable query settings—set variable refresh to "On dashboard load" instead of "On time range change."
|
||||
1. Avoid using wildcards in dimensions when possible, as they generate search expressions with multiple API calls.
|
||||
|
||||
## Other common issues
|
||||
|
||||
These issues don't produce specific error messages but are commonly encountered.
|
||||
|
||||
### Custom metrics don't appear
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Custom metrics from applications or agents don't show in the namespace drop-down
|
||||
- Only standard AWS namespaces are visible
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Add your custom metric namespace to the **Namespaces of Custom Metrics** field in the data source configuration.
|
||||
1. Separate multiple namespaces with commas (for example, `CWAgent,CustomNamespace`).
|
||||
1. Verify custom metrics have been published to CloudWatch in the selected region.
|
||||
|
||||
### Pre-configured dashboards not working
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Imported dashboards show no data
|
||||
- Dashboard variables don't load
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the data source name in the dashboard matches your CloudWatch data source.
|
||||
1. Check that the dashboard's AWS region setting matches where your resources are located.
|
||||
1. Ensure the IAM policy grants access to the required services (EC2, Lambda, RDS, etc.).
|
||||
1. Verify resources exist and are emitting metrics in the selected region.
|
||||
|
||||
### X-Ray trace links not appearing
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Log entries don't show X-Ray trace links
|
||||
- `@xrayTraceId` field not appearing
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify an X-Ray data source is configured and linked in the CloudWatch data source settings.
|
||||
1. Ensure your logs contain the `@xrayTraceId` field.
|
||||
1. Update log queries to include `@xrayTraceId` in the fields, for example: `fields @message, @xrayTraceId`.
|
||||
1. Configure your application to log X-Ray trace IDs. Refer to the [AWS X-Ray documentation](https://docs.aws.amazon.com/xray/latest/devguide/xray-services.html).
|
||||
|
||||
## Enable debug logging
|
||||
|
||||
To capture detailed error information for troubleshooting:
|
||||
|
||||
1. Set the Grafana log level to `debug` in the configuration file:
|
||||
|
||||
```ini
|
||||
[log]
|
||||
level = debug
|
||||
```
|
||||
|
||||
1. Review logs in `/var/log/grafana/grafana.log` (or your configured log location).
|
||||
1. Look for CloudWatch-specific entries that include request and response details.
|
||||
1. Reset the log level to `info` after troubleshooting to avoid excessive log volume.
|
||||
|
||||
## Get additional help
|
||||
|
||||
If you've tried the solutions above and still encounter issues:
|
||||
|
||||
1. Check the [Grafana community forums](https://community.grafana.com/) for similar issues.
|
||||
1. Review the [CloudWatch plugin GitHub issues](https://github.com/grafana/grafana/issues) for known bugs.
|
||||
1. Consult the [AWS CloudWatch documentation](https://docs.aws.amazon.com/cloudwatch/) for service-specific guidance.
|
||||
1. Contact Grafana Support if you're an Enterprise, Cloud Pro, or Cloud Contracted user.
|
||||
1. When reporting issues, include:
|
||||
- Grafana version
|
||||
- AWS region
|
||||
- Error messages (redact sensitive information)
|
||||
- Steps to reproduce
|
||||
- Query configuration (redact credentials and account IDs)
|
||||
@@ -124,8 +124,6 @@ For more information about dashboard permissions, refer to [Dashboard permission
|
||||
## Restore deleted dashboards
|
||||
|
||||
{{% admonition type="caution" %}}
|
||||
Restoring deleted dashboards is currently in private preview. Grafana Labs offers support on a best-effort basis, and breaking changes might occur prior to the feature being made generally available.
|
||||
|
||||
The feature is only available in Grafana Cloud.
|
||||
{{% /admonition %}}
|
||||
|
||||
|
||||
11
go.mod
11
go.mod
@@ -32,14 +32,13 @@ require (
|
||||
github.com/apache/arrow-go/v18 v18.4.1 // @grafana/plugins-platform-backend
|
||||
github.com/armon/go-radix v1.0.0 // @grafana/grafana-app-platform-squad
|
||||
github.com/aws/aws-sdk-go v1.55.7 // @grafana/aws-datasources
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0 // @grafana/aws-datasources
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1 // @grafana/aws-datasources
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.45.3 // @grafana/aws-datasources
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.51.0 // @grafana/aws-datasources
|
||||
github.com/aws/aws-sdk-go-v2/service/ec2 v1.225.2 // @grafana/aws-datasources
|
||||
github.com/aws/aws-sdk-go-v2/service/oam v1.18.3 // @grafana/aws-datasources
|
||||
github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6 // @grafana/aws-datasources
|
||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.40.1 // @grafana/grafana-operator-experience-squad
|
||||
github.com/aws/smithy-go v1.23.2 // @grafana/aws-datasources
|
||||
github.com/aws/smithy-go v1.23.1 // @grafana/aws-datasources
|
||||
github.com/beevik/etree v1.4.1 // @grafana/grafana-backend-group
|
||||
github.com/benbjohnson/clock v1.3.5 // @grafana/alerting-backend
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect; @grafana/grafana-developer-enablement-squad
|
||||
@@ -147,7 +146,6 @@ require (
|
||||
github.com/olekukonko/tablewriter v0.0.5 // @grafana/grafana-backend-group
|
||||
github.com/open-feature/go-sdk v1.16.0 // @grafana/grafana-backend-group
|
||||
github.com/open-feature/go-sdk-contrib/providers/go-feature-flag v0.2.6 // @grafana/grafana-backend-group
|
||||
github.com/open-feature/go-sdk-contrib/providers/ofrep v0.1.6 // @grafana/grafana-backend-group
|
||||
github.com/openfga/api/proto v0.0.0-20250909172242-b4b2a12f5c67 // @grafana/identity-access-team
|
||||
github.com/openfga/language/pkg/go v0.2.0-beta.2.0.20251027165255-0f8f255e5f6c // @grafana/identity-access-team
|
||||
github.com/openfga/openfga v1.11.1 // @grafana/identity-access-team
|
||||
@@ -346,8 +344,8 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect
|
||||
@@ -545,6 +543,7 @@ require (
|
||||
github.com/oklog/run v1.1.0 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/oklog/ulid/v2 v2.1.1 // indirect
|
||||
github.com/open-feature/go-sdk-contrib/providers/ofrep v0.1.6 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.124.1 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/core/xidutils v0.124.1 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.124.1 // indirect
|
||||
|
||||
18
go.sum
18
go.sum
@@ -850,8 +850,8 @@ github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2z
|
||||
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=
|
||||
github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc=
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1 h1:fWZhGAwVRK/fAN2tmt7ilH4PPAE11rDj7HytrmbZ2FE=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 h1:12SpdwU8Djs+YGklkinSSlcrPyj3H4VifVsKf78KbwA=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11/go.mod h1:dd+Lkp6YmMryke+qxW/VnKyhMBDTYP41Q2Bb+6gNZgY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.10 h1:7LllDZAegXU3yk41mwM6KcPu0wmjKGQB1bg99bNdQm4=
|
||||
@@ -862,10 +862,10 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8 h1:gLD09eaJUdiszm7vd1btiQU
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8/go.mod h1:4RW3oMPt1POR74qVOC4SbubxAwdP4pCT0nSw3jycOU4=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84 h1:cTXRdLkpBanlDwISl+5chq5ui1d1YWg4PWMR9c3kXyw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84/go.mod h1:kwSy5X7tfIHN39uucmjQVs2LvDdXEjQucgQQEqCggEo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 h1:6bgAZgRyT4RoFWhxS+aoGMFyE0cD1bSzFnEEi4bFPGI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8/go.mod h1:KcGkXFVU8U28qS4KvLEcPxytPZPBcRawaH2Pf/0jptE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 h1:HhJYoES3zOz34yWEpGENqJvRVPqpmJyR3+AFg9ybhdY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8/go.mod h1:JnA+hPWeYAVbDssp83tv+ysAG8lTfLVXvSsyKg/7xNA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 h1:GMYy2EOWfzdP3wfVAGXBNKY5vK4K8vMET4sYOYltmqs=
|
||||
@@ -892,16 +892,14 @@ github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6 h1:Pwbxovp
|
||||
github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6/go.mod h1:Z4xLt5mXspLKjBV92i165wAJ/3T6TIv4n7RtIS8pWV0=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.0 h1:0reDqfEN+tB+sozj2r92Bep8MEwBZgtAXTND1Kk9OXg=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.0/go.mod h1:kUklwasNoCn5YpyAqC/97r6dzTA1SRKJfKq16SXeoDU=
|
||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.40.1 h1:w6a0H79HrHf3lr+zrw+pSzR5B+caiQFAKiNHlrUcnoc=
|
||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.40.1/go.mod h1:c6Vg0BRiU7v0MVhHupw90RyL120QBwAMLbDCzptGeMk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.4 h1:FTdEN9dtWPB0EOURNtDPmwGp6GGvMqRJCAihkSl/1No=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.4/go.mod h1:mYubxV9Ff42fZH4kexj43gFPhgc/LyC7KqvUKt1watc=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0 h1:I7ghctfGXrscr7r1Ga/mDqSJKm7Fkpl5Mwq79Z+rZqU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0/go.mod h1:Zo9id81XP6jbayIFWNuDpA6lMBWhsVy+3ou2jLa4JnA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 h1:+LVB0xBqEgjQoqr9bGZbRzvg212B0f17JdflleJRNR4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5/go.mod h1:xoaxeqnnUaZjPjaICgIy5B+MHCSb/ZSOn4MvkFNOUA0=
|
||||
github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM=
|
||||
github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/aws/smithy-go v1.23.1 h1:sLvcH6dfAFwGkHLZ7dGiYF7aK6mg4CgKA/iDKjLDt9M=
|
||||
github.com/aws/smithy-go v1.23.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20191112132149-a4c4c47bc57f/go.mod h1:2stgcRjl6QmW+gU2h5E7BQXg4HU0gzxKWDuT5HviN9s=
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27 h1:60m4tnanN1ctzIu4V3bfCNJ39BiOPSm1gHFlFjTkRE0=
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c=
|
||||
|
||||
@@ -423,7 +423,6 @@ github.com/aws/aws-msk-iam-sasl-signer-go v1.0.1 h1:nMp7diZObd4XEVUR0pEvn7/E13JI
|
||||
github.com/aws/aws-msk-iam-sasl-signer-go v1.0.1/go.mod h1:MVYeeOhILFFemC/XlYTClvBjYZrg/EPd3ts885KrNTI=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.5/go.mod h1:EYrzvCCN9CMUTa5+6lf6MM4tq3Zjp8UhSGR/cBsjai0=
|
||||
github.com/aws/aws-sdk-go-v2 v1.38.1/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.17/go.mod h1:9P4wwACpbeXs9Pm9w1QTh6BwWwJjwYvJ1iCt5QbCXh8=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.2/go.mod h1:17ft42Yb2lF6OigqSYiDAiUcX4RIkEMY6XxEMJsrAes=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.70/go.mod h1:M+lWhhmomVGgtuPOhO85u4pEa3SmssPTdcYpP/5J/xc=
|
||||
@@ -437,10 +436,8 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.4/go.mod h1:9xzb8/SV62W6gHQG
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.69/go.mod h1:GJj8mmO6YT6EqgduWocwhMoxTLFitkhIrK+owzrYL2I=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36/go.mod h1:Q1lnJArKRXkenyog6+Y+zr7WDpk4e6XlR6gs20bbeNo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.4/go.mod h1:l4bdfCD7XyyZA9BolKBo1eLqgaJxl0/x91PL4Yqe0ao=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8/go.mod h1:KcGkXFVU8U28qS4KvLEcPxytPZPBcRawaH2Pf/0jptE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36/go.mod h1:UdyGa7Q91id/sdyHPwth+043HhmP6yP9MBHgbZM0xo8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.4/go.mod h1:yDmJgqOiH4EA8Hndnv4KwAo8jCGTSnM5ASG1nBI+toA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8/go.mod h1:JnA+hPWeYAVbDssp83tv+ysAG8lTfLVXvSsyKg/7xNA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs=
|
||||
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.44.0 h1:A99gjqZDbdhjtjJVZrmVzVKO2+p3MSg35bDWtbMQVxw=
|
||||
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.44.0/go.mod h1:mWB0GE1bqcVSvpW7OtFA0sKuHk52+IqtnsYU2jUfYAs=
|
||||
@@ -494,7 +491,6 @@ github.com/aws/smithy-go v1.22.4/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp
|
||||
github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw=
|
||||
github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
|
||||
github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
|
||||
github.com/aws/smithy-go v1.23.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/awslabs/aws-lambda-go-api-proxy v0.16.2 h1:CJyGEyO1CIwOnXTU40urf0mchf6t3voxpvUDikOU9LY=
|
||||
github.com/awslabs/aws-lambda-go-api-proxy v0.16.2/go.mod h1:vxxjwBHe/KbgFeNlAP/Tvp4SsVRL3WQamcWRxqVh0z0=
|
||||
github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8=
|
||||
|
||||
@@ -58,12 +58,14 @@
|
||||
"bundle": "rollup -c rollup.config.ts --configPlugin esbuild",
|
||||
"clean": "rimraf ./dist ./compiled ./unstable ./testing ./package.tgz",
|
||||
"typecheck": "tsc --emitDeclarationOnly false --noEmit",
|
||||
"codegen": "rtk-query-codegen-openapi ./scripts/codegen.ts",
|
||||
"prepack": "cp package.json package.json.bak && node ../../scripts/prepare-npm-package.js",
|
||||
"postpack": "mv package.json.bak package.json",
|
||||
"i18n-extract": "i18next-cli extract --sync-primary"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@grafana/test-utils": "workspace:*",
|
||||
"@rtk-query/codegen-openapi": "^2.0.0",
|
||||
"@testing-library/jest-dom": "^6.6.3",
|
||||
"@testing-library/react": "^16.3.0",
|
||||
"@testing-library/user-event": "^14.6.1",
|
||||
@@ -94,7 +96,6 @@
|
||||
"dependencies": {
|
||||
"@emotion/css": "11.13.5",
|
||||
"@faker-js/faker": "^9.8.0",
|
||||
"@grafana/api-clients": "12.4.0-pre",
|
||||
"@grafana/i18n": "12.4.0-pre",
|
||||
"@reduxjs/toolkit": "^2.9.0",
|
||||
"fishery": "^2.3.1",
|
||||
|
||||
23
packages/grafana-alerting/scripts/README.md
Normal file
23
packages/grafana-alerting/scripts/README.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# Re-generate the clients
|
||||
|
||||
⚠️ This guide assumes the Backend definitions have been updated in `apps/alerting`.
|
||||
|
||||
## Re-create OpenAPI specification
|
||||
|
||||
Start with re-generating the OpenAPI snapshots by running the test in `pkg/tests/apis/openapi_test.go`.
|
||||
|
||||
This will output the OpenAPI JSON spec file(s) in `pkg/tests/apis/openapi_snapshots`.
|
||||
|
||||
## Process OpenAPI specifications
|
||||
|
||||
Next up run the post-processing of the snapshots with `yarn run process-specs`, this will copy processed specifications to `./data/openapi/`.
|
||||
|
||||
## Generate RTKQ files
|
||||
|
||||
These files are built using the `yarn run codegen` command, make sure to run that in the Grafana Alerting package working directory.
|
||||
|
||||
`yarn --cwd ./packages/grafana-alerting run codegen`.
|
||||
|
||||
API clients will be written to `src/grafana/api/<version>/api.gen.ts`.
|
||||
|
||||
Make sure to create a versioned API client for each API version – see `src/grafana/api/v0alpha1/api.ts` as an example.
|
||||
55
packages/grafana-alerting/scripts/codegen.ts
Normal file
55
packages/grafana-alerting/scripts/codegen.ts
Normal file
@@ -0,0 +1,55 @@
|
||||
/**
|
||||
* This script will generate TypeScript type definitions and a RTKQ clients for the alerting k8s APIs.
|
||||
*
|
||||
* Run `yarn run codegen` from the "grafana-alerting" package to invoke this script.
|
||||
*
|
||||
* API clients will be placed in "src/grafana/api/<version>/api.gen.ts"
|
||||
*/
|
||||
import type { ConfigFile } from '@rtk-query/codegen-openapi';
|
||||
|
||||
// ℹ️ append API groups and versions here to generate additional API clients
|
||||
const SPECS = [
|
||||
['notifications.alerting.grafana.app', ['v0alpha1']],
|
||||
['rules.alerting.grafana.app', ['v0alpha1']],
|
||||
// keep this in Grafana Enterprise
|
||||
// ['alertenrichment.grafana.app', ['v1beta1']],
|
||||
] as const;
|
||||
|
||||
type OutputFile = Omit<ConfigFile, 'outputFile'>;
|
||||
type OutputFiles = Record<string, OutputFile>;
|
||||
|
||||
const outputFiles = SPECS.reduce<OutputFiles>((groupAcc, [group, versions]) => {
|
||||
return versions.reduce<OutputFiles>((versionAcc, version) => {
|
||||
// Create a unique export name based on the group
|
||||
const groupName = group.split('.')[0]; // e.g., 'notifications', 'rules', 'alertenrichment'
|
||||
const exportName = `${groupName}API`;
|
||||
|
||||
// ℹ️ these snapshots are generated by running "go test pkg/tests/apis/openapi_test.go" and "scripts/process-specs.ts",
|
||||
// see the README in the "openapi_snapshots" directory
|
||||
const schemaFile = `../../../data/openapi/${group}-${version}.json`;
|
||||
|
||||
// ℹ️ make sure there is a API file in each versioned directory
|
||||
const apiFile = `../src/grafana/api/${groupName}/${version}/api.ts`;
|
||||
|
||||
// output each api client into a versioned directory with group-specific naming
|
||||
const outputPath = `../src/grafana/api/${groupName}/${version}/${groupName}.api.gen.ts`;
|
||||
|
||||
versionAcc[outputPath] = {
|
||||
exportName,
|
||||
schemaFile,
|
||||
apiFile,
|
||||
tag: true, // generate tags for cache invalidation
|
||||
} satisfies OutputFile;
|
||||
|
||||
return versionAcc;
|
||||
}, groupAcc);
|
||||
}, {});
|
||||
|
||||
export default {
|
||||
// these are intentionally empty but will be set for each versioned config file
|
||||
exportName: '',
|
||||
schemaFile: '',
|
||||
apiFile: '',
|
||||
|
||||
outputFiles,
|
||||
} satisfies ConfigFile;
|
||||
@@ -0,0 +1,18 @@
|
||||
import { createApi, fetchBaseQuery } from '@reduxjs/toolkit/query/react';
|
||||
|
||||
import { getAPIBaseURL, getAPIReducerPath } from '../../util';
|
||||
|
||||
import { GROUP, VERSION } from './const';
|
||||
|
||||
const baseUrl = getAPIBaseURL(GROUP, VERSION);
|
||||
const reducerPath = getAPIReducerPath(GROUP, VERSION);
|
||||
|
||||
export const api = createApi({
|
||||
reducerPath,
|
||||
baseQuery: fetchBaseQuery({
|
||||
// Set URL correctly so MSW can intercept requests
|
||||
// https://mswjs.io/docs/runbook#rtk-query-requests-are-not-intercepted
|
||||
baseUrl: new URL(baseUrl, globalThis.location.origin).href,
|
||||
}),
|
||||
endpoints: () => ({}),
|
||||
});
|
||||
@@ -0,0 +1,2 @@
|
||||
export const VERSION = 'v0alpha1' as const;
|
||||
export const GROUP = 'notifications.alerting.grafana.app' as const;
|
||||
@@ -1,9 +1,8 @@
|
||||
import { faker } from '@faker-js/faker';
|
||||
import { Factory } from 'fishery';
|
||||
|
||||
import { API_GROUP, API_VERSION } from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { DEFAULT_NAMESPACE, generateResourceVersion, generateTitle, generateUID } from '../../../../../mocks/util';
|
||||
import { GROUP, VERSION } from '../../const';
|
||||
import {
|
||||
ContactPoint,
|
||||
ContactPointMetadataAnnotations,
|
||||
@@ -15,7 +14,7 @@ import { AlertingEntityMetadataAnnotationsFactory } from './common';
|
||||
|
||||
export const ListReceiverApiResponseFactory = Factory.define<EnhancedListReceiverApiResponse>(() => ({
|
||||
kind: 'ReceiverList',
|
||||
apiVersion: `${API_GROUP}/${API_VERSION}`,
|
||||
apiVersion: `${GROUP}/${VERSION}`,
|
||||
metadata: {
|
||||
resourceVersion: generateResourceVersion(),
|
||||
},
|
||||
@@ -27,7 +26,7 @@ export const ContactPointFactory = Factory.define<ContactPoint>(() => {
|
||||
|
||||
return {
|
||||
kind: 'Receiver',
|
||||
apiVersion: `${API_GROUP}/${API_VERSION}`,
|
||||
apiVersion: `${GROUP}/${VERSION}`,
|
||||
metadata: {
|
||||
name: btoa(title),
|
||||
namespace: DEFAULT_NAMESPACE,
|
||||
|
||||
@@ -1,17 +1,13 @@
|
||||
import { HttpResponse, http } from 'msw';
|
||||
|
||||
import {
|
||||
API_GROUP,
|
||||
API_VERSION,
|
||||
CreateReceiverApiResponse,
|
||||
} from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { getAPIBaseURLForMocks } from '../../../../../../mocks/util';
|
||||
import { CreateReceiverApiResponse } from '../../../../v0alpha1/notifications.api.gen';
|
||||
import { GROUP, VERSION } from '../../../const';
|
||||
|
||||
export function createReceiverHandler(
|
||||
data: CreateReceiverApiResponse | ((info: Parameters<Parameters<typeof http.post>[1]>[0]) => Response)
|
||||
) {
|
||||
return http.post(getAPIBaseURLForMocks(API_GROUP, API_VERSION, '/receivers'), function handler(info) {
|
||||
return http.post(getAPIBaseURLForMocks(GROUP, VERSION, '/receivers'), function handler(info) {
|
||||
if (typeof data === 'function') {
|
||||
return data(info);
|
||||
}
|
||||
|
||||
@@ -1,17 +1,13 @@
|
||||
import { HttpResponse, http } from 'msw';
|
||||
|
||||
import {
|
||||
API_GROUP,
|
||||
API_VERSION,
|
||||
DeleteReceiverApiResponse,
|
||||
} from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { getAPIBaseURLForMocks } from '../../../../../../mocks/util';
|
||||
import { DeleteReceiverApiResponse } from '../../../../v0alpha1/notifications.api.gen';
|
||||
import { GROUP, VERSION } from '../../../const';
|
||||
|
||||
export function deleteReceiverHandler(
|
||||
data: DeleteReceiverApiResponse | ((info: Parameters<Parameters<typeof http.delete>[1]>[0]) => Response)
|
||||
) {
|
||||
return http.delete(getAPIBaseURLForMocks(API_GROUP, API_VERSION, '/receivers/:name'), function handler(info) {
|
||||
return http.delete(getAPIBaseURLForMocks(GROUP, VERSION, '/receivers/:name'), function handler(info) {
|
||||
if (typeof data === 'function') {
|
||||
return data(info);
|
||||
}
|
||||
|
||||
@@ -1,17 +1,13 @@
|
||||
import { HttpResponse, http } from 'msw';
|
||||
|
||||
import {
|
||||
API_GROUP,
|
||||
API_VERSION,
|
||||
DeletecollectionReceiverApiResponse,
|
||||
} from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { getAPIBaseURLForMocks } from '../../../../../../mocks/util';
|
||||
import { DeletecollectionReceiverApiResponse } from '../../../../v0alpha1/notifications.api.gen';
|
||||
import { GROUP, VERSION } from '../../../const';
|
||||
|
||||
export function deletecollectionReceiverHandler(
|
||||
data: DeletecollectionReceiverApiResponse | ((info: Parameters<Parameters<typeof http.delete>[1]>[0]) => Response)
|
||||
) {
|
||||
return http.delete(getAPIBaseURLForMocks(API_GROUP, API_VERSION, '/receivers'), function handler(info) {
|
||||
return http.delete(getAPIBaseURLForMocks(GROUP, VERSION, '/receivers'), function handler(info) {
|
||||
if (typeof data === 'function') {
|
||||
return data(info);
|
||||
}
|
||||
|
||||
@@ -1,17 +1,13 @@
|
||||
import { HttpResponse, http } from 'msw';
|
||||
|
||||
import {
|
||||
API_GROUP,
|
||||
API_VERSION,
|
||||
GetReceiverApiResponse,
|
||||
} from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { getAPIBaseURLForMocks } from '../../../../../../mocks/util';
|
||||
import { GetReceiverApiResponse } from '../../../../v0alpha1/notifications.api.gen';
|
||||
import { GROUP, VERSION } from '../../../const';
|
||||
|
||||
export function getReceiverHandler(
|
||||
data: GetReceiverApiResponse | ((info: Parameters<Parameters<typeof http.get>[1]>[0]) => Response)
|
||||
) {
|
||||
return http.get(getAPIBaseURLForMocks(API_GROUP, API_VERSION, '/receivers/:name'), function handler(info) {
|
||||
return http.get(getAPIBaseURLForMocks(GROUP, VERSION, '/receivers/:name'), function handler(info) {
|
||||
if (typeof data === 'function') {
|
||||
return data(info);
|
||||
}
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
import { HttpResponse, http } from 'msw';
|
||||
|
||||
import { API_GROUP, API_VERSION } from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { getAPIBaseURLForMocks } from '../../../../../../mocks/util';
|
||||
import { GROUP, VERSION } from '../../../const';
|
||||
import { EnhancedListReceiverApiResponse } from '../../../types';
|
||||
|
||||
export function listReceiverHandler(
|
||||
data: EnhancedListReceiverApiResponse | ((info: Parameters<Parameters<typeof http.get>[1]>[0]) => Response)
|
||||
) {
|
||||
return http.get(getAPIBaseURLForMocks(API_GROUP, API_VERSION, '/receivers'), function handler(info) {
|
||||
return http.get(getAPIBaseURLForMocks(GROUP, VERSION, '/receivers'), function handler(info) {
|
||||
if (typeof data === 'function') {
|
||||
return data(info);
|
||||
}
|
||||
|
||||
@@ -1,17 +1,13 @@
|
||||
import { HttpResponse, http } from 'msw';
|
||||
|
||||
import {
|
||||
API_GROUP,
|
||||
API_VERSION,
|
||||
ReplaceReceiverApiResponse,
|
||||
} from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { getAPIBaseURLForMocks } from '../../../../../../mocks/util';
|
||||
import { ReplaceReceiverApiResponse } from '../../../../v0alpha1/notifications.api.gen';
|
||||
import { GROUP, VERSION } from '../../../const';
|
||||
|
||||
export function replaceReceiverHandler(
|
||||
data: ReplaceReceiverApiResponse | ((info: Parameters<Parameters<typeof http.put>[1]>[0]) => Response)
|
||||
) {
|
||||
return http.put(getAPIBaseURLForMocks(API_GROUP, API_VERSION, '/receivers/:name'), function handler(info) {
|
||||
return http.put(getAPIBaseURLForMocks(GROUP, VERSION, '/receivers/:name'), function handler(info) {
|
||||
if (typeof data === 'function') {
|
||||
return data(info);
|
||||
}
|
||||
|
||||
@@ -1,17 +1,13 @@
|
||||
import { HttpResponse, http } from 'msw';
|
||||
|
||||
import {
|
||||
API_GROUP,
|
||||
API_VERSION,
|
||||
UpdateReceiverApiResponse,
|
||||
} from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { getAPIBaseURLForMocks } from '../../../../../../mocks/util';
|
||||
import { UpdateReceiverApiResponse } from '../../../../v0alpha1/notifications.api.gen';
|
||||
import { GROUP, VERSION } from '../../../const';
|
||||
|
||||
export function updateReceiverHandler(
|
||||
data: UpdateReceiverApiResponse | ((info: Parameters<Parameters<typeof http.patch>[1]>[0]) => Response)
|
||||
) {
|
||||
return http.patch(getAPIBaseURLForMocks(API_GROUP, API_VERSION, '/receivers/:name'), function handler(info) {
|
||||
return http.patch(getAPIBaseURLForMocks(GROUP, VERSION, '/receivers/:name'), function handler(info) {
|
||||
if (typeof data === 'function') {
|
||||
return data(info);
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { api } from './baseAPI';
|
||||
import { api } from './api';
|
||||
export const addTagTypes = ['API Discovery', 'Receiver', 'RoutingTree', 'TemplateGroup', 'TimeInterval'] as const;
|
||||
const injectedRtkApi = api
|
||||
.enhanceEndpoints({
|
||||
@@ -7,7 +7,7 @@ const injectedRtkApi = api
|
||||
.injectEndpoints({
|
||||
endpoints: (build) => ({
|
||||
getApiResources: build.query<GetApiResourcesApiResponse, GetApiResourcesApiArg>({
|
||||
query: () => ({ url: `/` }),
|
||||
query: () => ({ url: `/apis/notifications.alerting.grafana.app/v0alpha1/` }),
|
||||
providesTags: ['API Discovery'],
|
||||
}),
|
||||
listReceiver: build.query<ListReceiverApiResponse, ListReceiverApiArg>({
|
||||
@@ -119,6 +119,44 @@ const injectedRtkApi = api
|
||||
}),
|
||||
invalidatesTags: ['Receiver'],
|
||||
}),
|
||||
getReceiverStatus: build.query<GetReceiverStatusApiResponse, GetReceiverStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/receivers/${queryArg.name}/status`,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
},
|
||||
}),
|
||||
providesTags: ['Receiver'],
|
||||
}),
|
||||
replaceReceiverStatus: build.mutation<ReplaceReceiverStatusApiResponse, ReplaceReceiverStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/receivers/${queryArg.name}/status`,
|
||||
method: 'PUT',
|
||||
body: queryArg.receiver,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
dryRun: queryArg.dryRun,
|
||||
fieldManager: queryArg.fieldManager,
|
||||
fieldValidation: queryArg.fieldValidation,
|
||||
},
|
||||
}),
|
||||
invalidatesTags: ['Receiver'],
|
||||
}),
|
||||
updateReceiverStatus: build.mutation<UpdateReceiverStatusApiResponse, UpdateReceiverStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/receivers/${queryArg.name}/status`,
|
||||
method: 'PATCH',
|
||||
body: queryArg.patch,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
dryRun: queryArg.dryRun,
|
||||
fieldManager: queryArg.fieldManager,
|
||||
fieldValidation: queryArg.fieldValidation,
|
||||
force: queryArg.force,
|
||||
},
|
||||
}),
|
||||
invalidatesTags: ['Receiver'],
|
||||
}),
|
||||
listRoutingTree: build.query<ListRoutingTreeApiResponse, ListRoutingTreeApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/routingtrees`,
|
||||
@@ -231,6 +269,44 @@ const injectedRtkApi = api
|
||||
}),
|
||||
invalidatesTags: ['RoutingTree'],
|
||||
}),
|
||||
getRoutingTreeStatus: build.query<GetRoutingTreeStatusApiResponse, GetRoutingTreeStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/routingtrees/${queryArg.name}/status`,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
},
|
||||
}),
|
||||
providesTags: ['RoutingTree'],
|
||||
}),
|
||||
replaceRoutingTreeStatus: build.mutation<ReplaceRoutingTreeStatusApiResponse, ReplaceRoutingTreeStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/routingtrees/${queryArg.name}/status`,
|
||||
method: 'PUT',
|
||||
body: queryArg.routingTree,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
dryRun: queryArg.dryRun,
|
||||
fieldManager: queryArg.fieldManager,
|
||||
fieldValidation: queryArg.fieldValidation,
|
||||
},
|
||||
}),
|
||||
invalidatesTags: ['RoutingTree'],
|
||||
}),
|
||||
updateRoutingTreeStatus: build.mutation<UpdateRoutingTreeStatusApiResponse, UpdateRoutingTreeStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/routingtrees/${queryArg.name}/status`,
|
||||
method: 'PATCH',
|
||||
body: queryArg.patch,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
dryRun: queryArg.dryRun,
|
||||
fieldManager: queryArg.fieldManager,
|
||||
fieldValidation: queryArg.fieldValidation,
|
||||
force: queryArg.force,
|
||||
},
|
||||
}),
|
||||
invalidatesTags: ['RoutingTree'],
|
||||
}),
|
||||
listTemplateGroup: build.query<ListTemplateGroupApiResponse, ListTemplateGroupApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/templategroups`,
|
||||
@@ -343,6 +419,47 @@ const injectedRtkApi = api
|
||||
}),
|
||||
invalidatesTags: ['TemplateGroup'],
|
||||
}),
|
||||
getTemplateGroupStatus: build.query<GetTemplateGroupStatusApiResponse, GetTemplateGroupStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/templategroups/${queryArg.name}/status`,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
},
|
||||
}),
|
||||
providesTags: ['TemplateGroup'],
|
||||
}),
|
||||
replaceTemplateGroupStatus: build.mutation<
|
||||
ReplaceTemplateGroupStatusApiResponse,
|
||||
ReplaceTemplateGroupStatusApiArg
|
||||
>({
|
||||
query: (queryArg) => ({
|
||||
url: `/templategroups/${queryArg.name}/status`,
|
||||
method: 'PUT',
|
||||
body: queryArg.templateGroup,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
dryRun: queryArg.dryRun,
|
||||
fieldManager: queryArg.fieldManager,
|
||||
fieldValidation: queryArg.fieldValidation,
|
||||
},
|
||||
}),
|
||||
invalidatesTags: ['TemplateGroup'],
|
||||
}),
|
||||
updateTemplateGroupStatus: build.mutation<UpdateTemplateGroupStatusApiResponse, UpdateTemplateGroupStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/templategroups/${queryArg.name}/status`,
|
||||
method: 'PATCH',
|
||||
body: queryArg.patch,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
dryRun: queryArg.dryRun,
|
||||
fieldManager: queryArg.fieldManager,
|
||||
fieldValidation: queryArg.fieldValidation,
|
||||
force: queryArg.force,
|
||||
},
|
||||
}),
|
||||
invalidatesTags: ['TemplateGroup'],
|
||||
}),
|
||||
listTimeInterval: build.query<ListTimeIntervalApiResponse, ListTimeIntervalApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/timeintervals`,
|
||||
@@ -455,10 +572,48 @@ const injectedRtkApi = api
|
||||
}),
|
||||
invalidatesTags: ['TimeInterval'],
|
||||
}),
|
||||
getTimeIntervalStatus: build.query<GetTimeIntervalStatusApiResponse, GetTimeIntervalStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/timeintervals/${queryArg.name}/status`,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
},
|
||||
}),
|
||||
providesTags: ['TimeInterval'],
|
||||
}),
|
||||
replaceTimeIntervalStatus: build.mutation<ReplaceTimeIntervalStatusApiResponse, ReplaceTimeIntervalStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/timeintervals/${queryArg.name}/status`,
|
||||
method: 'PUT',
|
||||
body: queryArg.timeInterval,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
dryRun: queryArg.dryRun,
|
||||
fieldManager: queryArg.fieldManager,
|
||||
fieldValidation: queryArg.fieldValidation,
|
||||
},
|
||||
}),
|
||||
invalidatesTags: ['TimeInterval'],
|
||||
}),
|
||||
updateTimeIntervalStatus: build.mutation<UpdateTimeIntervalStatusApiResponse, UpdateTimeIntervalStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/timeintervals/${queryArg.name}/status`,
|
||||
method: 'PATCH',
|
||||
body: queryArg.patch,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
dryRun: queryArg.dryRun,
|
||||
fieldManager: queryArg.fieldManager,
|
||||
fieldValidation: queryArg.fieldValidation,
|
||||
force: queryArg.force,
|
||||
},
|
||||
}),
|
||||
invalidatesTags: ['TimeInterval'],
|
||||
}),
|
||||
}),
|
||||
overrideExisting: false,
|
||||
});
|
||||
export { injectedRtkApi as generatedAPI };
|
||||
export { injectedRtkApi as notificationsAPI };
|
||||
export type GetApiResourcesApiResponse = /** status 200 OK */ ApiResourceList;
|
||||
export type GetApiResourcesApiArg = void;
|
||||
export type ListReceiverApiResponse = /** status 200 OK */ ReceiverList;
|
||||
@@ -626,6 +781,43 @@ export type UpdateReceiverApiArg = {
|
||||
force?: boolean;
|
||||
patch: Patch;
|
||||
};
|
||||
export type GetReceiverStatusApiResponse = /** status 200 OK */ Receiver;
|
||||
export type GetReceiverStatusApiArg = {
|
||||
/** name of the Receiver */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
};
|
||||
export type ReplaceReceiverStatusApiResponse = /** status 200 OK */ Receiver | /** status 201 Created */ Receiver;
|
||||
export type ReplaceReceiverStatusApiArg = {
|
||||
/** name of the Receiver */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
/** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */
|
||||
dryRun?: string;
|
||||
/** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. */
|
||||
fieldManager?: string;
|
||||
/** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */
|
||||
fieldValidation?: string;
|
||||
receiver: Receiver;
|
||||
};
|
||||
export type UpdateReceiverStatusApiResponse = /** status 200 OK */ Receiver | /** status 201 Created */ Receiver;
|
||||
export type UpdateReceiverStatusApiArg = {
|
||||
/** name of the Receiver */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
/** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */
|
||||
dryRun?: string;
|
||||
/** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). */
|
||||
fieldManager?: string;
|
||||
/** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */
|
||||
fieldValidation?: string;
|
||||
/** Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. */
|
||||
force?: boolean;
|
||||
patch: Patch;
|
||||
};
|
||||
export type ListRoutingTreeApiResponse = /** status 200 OK */ RoutingTreeList;
|
||||
export type ListRoutingTreeApiArg = {
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
@@ -791,6 +983,47 @@ export type UpdateRoutingTreeApiArg = {
|
||||
force?: boolean;
|
||||
patch: Patch;
|
||||
};
|
||||
export type GetRoutingTreeStatusApiResponse = /** status 200 OK */ RoutingTree;
|
||||
export type GetRoutingTreeStatusApiArg = {
|
||||
/** name of the RoutingTree */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
};
|
||||
export type ReplaceRoutingTreeStatusApiResponse = /** status 200 OK */
|
||||
| RoutingTree
|
||||
| /** status 201 Created */ RoutingTree;
|
||||
export type ReplaceRoutingTreeStatusApiArg = {
|
||||
/** name of the RoutingTree */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
/** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */
|
||||
dryRun?: string;
|
||||
/** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. */
|
||||
fieldManager?: string;
|
||||
/** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */
|
||||
fieldValidation?: string;
|
||||
routingTree: RoutingTree;
|
||||
};
|
||||
export type UpdateRoutingTreeStatusApiResponse = /** status 200 OK */
|
||||
| RoutingTree
|
||||
| /** status 201 Created */ RoutingTree;
|
||||
export type UpdateRoutingTreeStatusApiArg = {
|
||||
/** name of the RoutingTree */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
/** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */
|
||||
dryRun?: string;
|
||||
/** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). */
|
||||
fieldManager?: string;
|
||||
/** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */
|
||||
fieldValidation?: string;
|
||||
/** Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. */
|
||||
force?: boolean;
|
||||
patch: Patch;
|
||||
};
|
||||
export type ListTemplateGroupApiResponse = /** status 200 OK */ TemplateGroupList;
|
||||
export type ListTemplateGroupApiArg = {
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
@@ -960,6 +1193,47 @@ export type UpdateTemplateGroupApiArg = {
|
||||
force?: boolean;
|
||||
patch: Patch;
|
||||
};
|
||||
export type GetTemplateGroupStatusApiResponse = /** status 200 OK */ TemplateGroup;
|
||||
export type GetTemplateGroupStatusApiArg = {
|
||||
/** name of the TemplateGroup */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
};
|
||||
export type ReplaceTemplateGroupStatusApiResponse = /** status 200 OK */
|
||||
| TemplateGroup
|
||||
| /** status 201 Created */ TemplateGroup;
|
||||
export type ReplaceTemplateGroupStatusApiArg = {
|
||||
/** name of the TemplateGroup */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
/** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */
|
||||
dryRun?: string;
|
||||
/** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. */
|
||||
fieldManager?: string;
|
||||
/** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */
|
||||
fieldValidation?: string;
|
||||
templateGroup: TemplateGroup;
|
||||
};
|
||||
export type UpdateTemplateGroupStatusApiResponse = /** status 200 OK */
|
||||
| TemplateGroup
|
||||
| /** status 201 Created */ TemplateGroup;
|
||||
export type UpdateTemplateGroupStatusApiArg = {
|
||||
/** name of the TemplateGroup */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
/** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */
|
||||
dryRun?: string;
|
||||
/** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). */
|
||||
fieldManager?: string;
|
||||
/** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */
|
||||
fieldValidation?: string;
|
||||
/** Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. */
|
||||
force?: boolean;
|
||||
patch: Patch;
|
||||
};
|
||||
export type ListTimeIntervalApiResponse = /** status 200 OK */ TimeIntervalList;
|
||||
export type ListTimeIntervalApiArg = {
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
@@ -1125,6 +1399,47 @@ export type UpdateTimeIntervalApiArg = {
|
||||
force?: boolean;
|
||||
patch: Patch;
|
||||
};
|
||||
export type GetTimeIntervalStatusApiResponse = /** status 200 OK */ TimeInterval;
|
||||
export type GetTimeIntervalStatusApiArg = {
|
||||
/** name of the TimeInterval */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
};
|
||||
export type ReplaceTimeIntervalStatusApiResponse = /** status 200 OK */
|
||||
| TimeInterval
|
||||
| /** status 201 Created */ TimeInterval;
|
||||
export type ReplaceTimeIntervalStatusApiArg = {
|
||||
/** name of the TimeInterval */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
/** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */
|
||||
dryRun?: string;
|
||||
/** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. */
|
||||
fieldManager?: string;
|
||||
/** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */
|
||||
fieldValidation?: string;
|
||||
timeInterval: TimeInterval;
|
||||
};
|
||||
export type UpdateTimeIntervalStatusApiResponse = /** status 200 OK */
|
||||
| TimeInterval
|
||||
| /** status 201 Created */ TimeInterval;
|
||||
export type UpdateTimeIntervalStatusApiArg = {
|
||||
/** name of the TimeInterval */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
/** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */
|
||||
dryRun?: string;
|
||||
/** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). */
|
||||
fieldManager?: string;
|
||||
/** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */
|
||||
fieldValidation?: string;
|
||||
/** Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. */
|
||||
force?: boolean;
|
||||
patch: Patch;
|
||||
};
|
||||
export type ApiResource = {
|
||||
/** categories is a list of the grouped resources this resource belongs to (e.g. 'all') */
|
||||
categories?: string[];
|
||||
@@ -1257,6 +1572,34 @@ export type ReceiverSpec = {
|
||||
integrations: ReceiverIntegration[];
|
||||
title: string;
|
||||
};
|
||||
export type ReceiverOperatorState = {
|
||||
/** descriptiveState is an optional more descriptive state field which has no requirements on format */
|
||||
descriptiveState?: string;
|
||||
/** details contains any extra information that is operator-specific */
|
||||
details?: {
|
||||
[key: string]: {
|
||||
[key: string]: any;
|
||||
};
|
||||
};
|
||||
/** lastEvaluation is the ResourceVersion last evaluated */
|
||||
lastEvaluation: string;
|
||||
/** state describes the state of the lastEvaluation.
|
||||
It is limited to three possible states for machine evaluation. */
|
||||
state: 'success' | 'in_progress' | 'failed';
|
||||
};
|
||||
export type ReceiverStatus = {
|
||||
/** additionalFields is reserved for future use */
|
||||
additionalFields?: {
|
||||
[key: string]: {
|
||||
[key: string]: any;
|
||||
};
|
||||
};
|
||||
/** operatorStates is a map of operator ID to operator state evaluations.
|
||||
Any operator which consumes this kind SHOULD add its state evaluation information to this field. */
|
||||
operatorStates?: {
|
||||
[key: string]: ReceiverOperatorState;
|
||||
};
|
||||
};
|
||||
export type Receiver = {
|
||||
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
|
||||
apiVersion: string;
|
||||
@@ -1264,6 +1607,7 @@ export type Receiver = {
|
||||
kind: string;
|
||||
metadata: ObjectMeta;
|
||||
spec: ReceiverSpec;
|
||||
status?: ReceiverStatus;
|
||||
};
|
||||
export type ListMeta = {
|
||||
/** continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message. */
|
||||
@@ -1356,6 +1700,34 @@ export type RoutingTreeSpec = {
|
||||
defaults: RoutingTreeRouteDefaults;
|
||||
routes: RoutingTreeRoute[];
|
||||
};
|
||||
export type RoutingTreeOperatorState = {
|
||||
/** descriptiveState is an optional more descriptive state field which has no requirements on format */
|
||||
descriptiveState?: string;
|
||||
/** details contains any extra information that is operator-specific */
|
||||
details?: {
|
||||
[key: string]: {
|
||||
[key: string]: any;
|
||||
};
|
||||
};
|
||||
/** lastEvaluation is the ResourceVersion last evaluated */
|
||||
lastEvaluation: string;
|
||||
/** state describes the state of the lastEvaluation.
|
||||
It is limited to three possible states for machine evaluation. */
|
||||
state: 'success' | 'in_progress' | 'failed';
|
||||
};
|
||||
export type RoutingTreeStatus = {
|
||||
/** additionalFields is reserved for future use */
|
||||
additionalFields?: {
|
||||
[key: string]: {
|
||||
[key: string]: any;
|
||||
};
|
||||
};
|
||||
/** operatorStates is a map of operator ID to operator state evaluations.
|
||||
Any operator which consumes this kind SHOULD add its state evaluation information to this field. */
|
||||
operatorStates?: {
|
||||
[key: string]: RoutingTreeOperatorState;
|
||||
};
|
||||
};
|
||||
export type RoutingTree = {
|
||||
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
|
||||
apiVersion: string;
|
||||
@@ -1363,6 +1735,7 @@ export type RoutingTree = {
|
||||
kind: string;
|
||||
metadata: ObjectMeta;
|
||||
spec: RoutingTreeSpec;
|
||||
status?: RoutingTreeStatus;
|
||||
};
|
||||
export type RoutingTreeList = {
|
||||
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
|
||||
@@ -1372,12 +1745,38 @@ export type RoutingTreeList = {
|
||||
kind?: string;
|
||||
metadata: ListMeta;
|
||||
};
|
||||
export type TemplateGroupTemplateKind = 'grafana' | 'mimir';
|
||||
export type TemplateGroupSpec = {
|
||||
content: string;
|
||||
kind: TemplateGroupTemplateKind;
|
||||
title: string;
|
||||
};
|
||||
export type TemplateGroupOperatorState = {
|
||||
/** descriptiveState is an optional more descriptive state field which has no requirements on format */
|
||||
descriptiveState?: string;
|
||||
/** details contains any extra information that is operator-specific */
|
||||
details?: {
|
||||
[key: string]: {
|
||||
[key: string]: any;
|
||||
};
|
||||
};
|
||||
/** lastEvaluation is the ResourceVersion last evaluated */
|
||||
lastEvaluation: string;
|
||||
/** state describes the state of the lastEvaluation.
|
||||
It is limited to three possible states for machine evaluation. */
|
||||
state: 'success' | 'in_progress' | 'failed';
|
||||
};
|
||||
export type TemplateGroupStatus = {
|
||||
/** additionalFields is reserved for future use */
|
||||
additionalFields?: {
|
||||
[key: string]: {
|
||||
[key: string]: any;
|
||||
};
|
||||
};
|
||||
/** operatorStates is a map of operator ID to operator state evaluations.
|
||||
Any operator which consumes this kind SHOULD add its state evaluation information to this field. */
|
||||
operatorStates?: {
|
||||
[key: string]: TemplateGroupOperatorState;
|
||||
};
|
||||
};
|
||||
export type TemplateGroup = {
|
||||
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
|
||||
apiVersion: string;
|
||||
@@ -1385,6 +1784,7 @@ export type TemplateGroup = {
|
||||
kind: string;
|
||||
metadata: ObjectMeta;
|
||||
spec: TemplateGroupSpec;
|
||||
status?: TemplateGroupStatus;
|
||||
};
|
||||
export type TemplateGroupList = {
|
||||
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
|
||||
@@ -1410,6 +1810,34 @@ export type TimeIntervalSpec = {
|
||||
name: string;
|
||||
time_intervals: TimeIntervalInterval[];
|
||||
};
|
||||
export type TimeIntervalOperatorState = {
|
||||
/** descriptiveState is an optional more descriptive state field which has no requirements on format */
|
||||
descriptiveState?: string;
|
||||
/** details contains any extra information that is operator-specific */
|
||||
details?: {
|
||||
[key: string]: {
|
||||
[key: string]: any;
|
||||
};
|
||||
};
|
||||
/** lastEvaluation is the ResourceVersion last evaluated */
|
||||
lastEvaluation: string;
|
||||
/** state describes the state of the lastEvaluation.
|
||||
It is limited to three possible states for machine evaluation. */
|
||||
state: 'success' | 'in_progress' | 'failed';
|
||||
};
|
||||
export type TimeIntervalStatus = {
|
||||
/** additionalFields is reserved for future use */
|
||||
additionalFields?: {
|
||||
[key: string]: {
|
||||
[key: string]: any;
|
||||
};
|
||||
};
|
||||
/** operatorStates is a map of operator ID to operator state evaluations.
|
||||
Any operator which consumes this kind SHOULD add its state evaluation information to this field. */
|
||||
operatorStates?: {
|
||||
[key: string]: TimeIntervalOperatorState;
|
||||
};
|
||||
};
|
||||
export type TimeInterval = {
|
||||
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
|
||||
apiVersion: string;
|
||||
@@ -1417,6 +1845,7 @@ export type TimeInterval = {
|
||||
kind: string;
|
||||
metadata: ObjectMeta;
|
||||
spec: TimeIntervalSpec;
|
||||
status?: TimeIntervalStatus;
|
||||
};
|
||||
export type TimeIntervalList = {
|
||||
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
|
||||
@@ -1426,43 +1855,3 @@ export type TimeIntervalList = {
|
||||
kind?: string;
|
||||
metadata: ListMeta;
|
||||
};
|
||||
export const {
|
||||
useGetApiResourcesQuery,
|
||||
useLazyGetApiResourcesQuery,
|
||||
useListReceiverQuery,
|
||||
useLazyListReceiverQuery,
|
||||
useCreateReceiverMutation,
|
||||
useDeletecollectionReceiverMutation,
|
||||
useGetReceiverQuery,
|
||||
useLazyGetReceiverQuery,
|
||||
useReplaceReceiverMutation,
|
||||
useDeleteReceiverMutation,
|
||||
useUpdateReceiverMutation,
|
||||
useListRoutingTreeQuery,
|
||||
useLazyListRoutingTreeQuery,
|
||||
useCreateRoutingTreeMutation,
|
||||
useDeletecollectionRoutingTreeMutation,
|
||||
useGetRoutingTreeQuery,
|
||||
useLazyGetRoutingTreeQuery,
|
||||
useReplaceRoutingTreeMutation,
|
||||
useDeleteRoutingTreeMutation,
|
||||
useUpdateRoutingTreeMutation,
|
||||
useListTemplateGroupQuery,
|
||||
useLazyListTemplateGroupQuery,
|
||||
useCreateTemplateGroupMutation,
|
||||
useDeletecollectionTemplateGroupMutation,
|
||||
useGetTemplateGroupQuery,
|
||||
useLazyGetTemplateGroupQuery,
|
||||
useReplaceTemplateGroupMutation,
|
||||
useDeleteTemplateGroupMutation,
|
||||
useUpdateTemplateGroupMutation,
|
||||
useListTimeIntervalQuery,
|
||||
useLazyListTimeIntervalQuery,
|
||||
useCreateTimeIntervalMutation,
|
||||
useDeletecollectionTimeIntervalMutation,
|
||||
useGetTimeIntervalQuery,
|
||||
useLazyGetTimeIntervalQuery,
|
||||
useReplaceTimeIntervalMutation,
|
||||
useDeleteTimeIntervalMutation,
|
||||
useUpdateTimeIntervalMutation,
|
||||
} = injectedRtkApi;
|
||||
@@ -3,11 +3,7 @@
|
||||
*/
|
||||
import { MergeDeep, MergeExclusive, OverrideProperties } from 'type-fest';
|
||||
|
||||
import type {
|
||||
ListReceiverApiResponse,
|
||||
Receiver,
|
||||
ReceiverIntegration,
|
||||
} from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
import type { ListReceiverApiResponse, Receiver, ReceiverIntegration } from './notifications.api.gen';
|
||||
|
||||
type GenericIntegration = OverrideProperties<
|
||||
ReceiverIntegration,
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
import { createApi, fetchBaseQuery } from '@reduxjs/toolkit/query/react';
|
||||
|
||||
import { getAPIBaseURL, getAPIReducerPath } from '../../util';
|
||||
|
||||
import { GROUP, VERSION } from './const';
|
||||
|
||||
const baseUrl = getAPIBaseURL(GROUP, VERSION);
|
||||
const reducerPath = getAPIReducerPath(GROUP, VERSION);
|
||||
|
||||
export const api = createApi({
|
||||
reducerPath,
|
||||
baseQuery: fetchBaseQuery({
|
||||
// Set URL correctly so MSW can intercept requests
|
||||
// https://mswjs.io/docs/runbook#rtk-query-requests-are-not-intercepted
|
||||
baseUrl: new URL(baseUrl, globalThis.location.origin).href,
|
||||
}),
|
||||
endpoints: () => ({}),
|
||||
});
|
||||
@@ -0,0 +1,2 @@
|
||||
export const VERSION = 'v0alpha1' as const;
|
||||
export const GROUP = 'rules.alerting.grafana.app' as const;
|
||||
@@ -1,4 +1,4 @@
|
||||
import { api } from './baseAPI';
|
||||
import { api } from './api';
|
||||
export const addTagTypes = ['API Discovery', 'AlertRule', 'RecordingRule'] as const;
|
||||
const injectedRtkApi = api
|
||||
.enhanceEndpoints({
|
||||
@@ -7,7 +7,7 @@ const injectedRtkApi = api
|
||||
.injectEndpoints({
|
||||
endpoints: (build) => ({
|
||||
getApiResources: build.query<GetApiResourcesApiResponse, GetApiResourcesApiArg>({
|
||||
query: () => ({ url: `/` }),
|
||||
query: () => ({ url: `/apis/rules.alerting.grafana.app/v0alpha1/` }),
|
||||
providesTags: ['API Discovery'],
|
||||
}),
|
||||
listAlertRule: build.query<ListAlertRuleApiResponse, ListAlertRuleApiArg>({
|
||||
@@ -313,7 +313,7 @@ const injectedRtkApi = api
|
||||
}),
|
||||
overrideExisting: false,
|
||||
});
|
||||
export { injectedRtkApi as generatedAPI };
|
||||
export { injectedRtkApi as rulesAPI };
|
||||
export type GetApiResourcesApiResponse = /** status 200 OK */ ApiResourceList;
|
||||
export type GetApiResourcesApiArg = void;
|
||||
export type ListAlertRuleApiResponse = /** status 200 OK */ AlertRuleList;
|
||||
@@ -1085,33 +1085,3 @@ export type RecordingRuleList = {
|
||||
kind?: string;
|
||||
metadata: ListMeta;
|
||||
};
|
||||
export const {
|
||||
useGetApiResourcesQuery,
|
||||
useLazyGetApiResourcesQuery,
|
||||
useListAlertRuleQuery,
|
||||
useLazyListAlertRuleQuery,
|
||||
useCreateAlertRuleMutation,
|
||||
useDeletecollectionAlertRuleMutation,
|
||||
useGetAlertRuleQuery,
|
||||
useLazyGetAlertRuleQuery,
|
||||
useReplaceAlertRuleMutation,
|
||||
useDeleteAlertRuleMutation,
|
||||
useUpdateAlertRuleMutation,
|
||||
useGetAlertRuleStatusQuery,
|
||||
useLazyGetAlertRuleStatusQuery,
|
||||
useReplaceAlertRuleStatusMutation,
|
||||
useUpdateAlertRuleStatusMutation,
|
||||
useListRecordingRuleQuery,
|
||||
useLazyListRecordingRuleQuery,
|
||||
useCreateRecordingRuleMutation,
|
||||
useDeletecollectionRecordingRuleMutation,
|
||||
useGetRecordingRuleQuery,
|
||||
useLazyGetRecordingRuleQuery,
|
||||
useReplaceRecordingRuleMutation,
|
||||
useDeleteRecordingRuleMutation,
|
||||
useUpdateRecordingRuleMutation,
|
||||
useGetRecordingRuleStatusQuery,
|
||||
useLazyGetRecordingRuleStatusQuery,
|
||||
useReplaceRecordingRuleStatusMutation,
|
||||
useUpdateRecordingRuleStatusMutation,
|
||||
} = injectedRtkApi;
|
||||
@@ -7,10 +7,9 @@ import { OverrideProperties } from 'type-fest';
|
||||
|
||||
import {
|
||||
CreateReceiverApiArg,
|
||||
ListReceiverApiArg,
|
||||
generatedAPI as notificationsAPIv0alpha1,
|
||||
} from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
type ListReceiverApiArg,
|
||||
notificationsAPI,
|
||||
} from '../../../api/notifications/v0alpha1/notifications.api.gen';
|
||||
import type { ContactPoint, EnhancedListReceiverApiResponse } from '../../../api/notifications/v0alpha1/types';
|
||||
|
||||
// this is a workaround for the fact that the generated types are not narrow enough
|
||||
@@ -23,17 +22,17 @@ type ListContactPointsHookResult = TypedUseQueryHookResult<
|
||||
// Type for the options that can be passed to the hook
|
||||
// Based on the pattern used for mutation options in this file
|
||||
type ListContactPointsQueryArgs = Parameters<
|
||||
typeof notificationsAPIv0alpha1.endpoints.listReceiver.useQuery<ListContactPointsHookResult>
|
||||
typeof notificationsAPI.endpoints.listReceiver.useQuery<ListContactPointsHookResult>
|
||||
>[0];
|
||||
|
||||
type ListContactPointsQueryOptions = Parameters<
|
||||
typeof notificationsAPIv0alpha1.endpoints.listReceiver.useQuery<ListContactPointsHookResult>
|
||||
typeof notificationsAPI.endpoints.listReceiver.useQuery<ListContactPointsHookResult>
|
||||
>[1];
|
||||
|
||||
/**
|
||||
* useListContactPoints is a hook that fetches a list of contact points
|
||||
*
|
||||
* This function wraps the notificationsAPIv0alpha1.useListReceiverQuery with proper typing
|
||||
* This function wraps the notificationsAPI.useListReceiverQuery with proper typing
|
||||
* to ensure that the returned ContactPoints are correctly typed in the data.items array.
|
||||
*
|
||||
* It automatically uses the configured namespace for the query.
|
||||
@@ -44,8 +43,8 @@ type ListContactPointsQueryOptions = Parameters<
|
||||
export function useListContactPoints(
|
||||
queryArgs: ListContactPointsQueryArgs = {},
|
||||
queryOptions: ListContactPointsQueryOptions = {}
|
||||
): ListContactPointsHookResult {
|
||||
return notificationsAPIv0alpha1.useListReceiverQuery<ListContactPointsHookResult>(queryArgs, queryOptions);
|
||||
) {
|
||||
return notificationsAPI.useListReceiverQuery<ListContactPointsHookResult>(queryArgs, queryOptions);
|
||||
}
|
||||
|
||||
// type narrowing mutations requires us to define a few helper types
|
||||
@@ -61,7 +60,7 @@ type CreateContactPointMutation = TypedUseMutationResult<
|
||||
>;
|
||||
|
||||
type UseCreateContactPointOptions = Parameters<
|
||||
typeof notificationsAPIv0alpha1.endpoints.createReceiver.useMutation<CreateContactPointMutation>
|
||||
typeof notificationsAPI.endpoints.createReceiver.useMutation<CreateContactPointMutation>
|
||||
>[0];
|
||||
|
||||
/**
|
||||
@@ -70,16 +69,8 @@ type UseCreateContactPointOptions = Parameters<
|
||||
* This function wraps the notificationsAPI.useCreateReceiverMutation with proper typing
|
||||
* to ensure that the payload supports type narrowing.
|
||||
*/
|
||||
export function useCreateContactPoint(
|
||||
options?: UseCreateContactPointOptions
|
||||
): readonly [
|
||||
(
|
||||
args: CreateContactPointArgs
|
||||
) => ReturnType<ReturnType<typeof notificationsAPIv0alpha1.endpoints.createReceiver.useMutation>[0]>,
|
||||
ReturnType<typeof notificationsAPIv0alpha1.endpoints.createReceiver.useMutation<CreateContactPointMutation>>[1],
|
||||
] {
|
||||
const [updateFn, result] =
|
||||
notificationsAPIv0alpha1.endpoints.createReceiver.useMutation<CreateContactPointMutation>(options);
|
||||
export function useCreateContactPoint(options?: UseCreateContactPointOptions) {
|
||||
const [updateFn, result] = notificationsAPI.endpoints.createReceiver.useMutation<CreateContactPointMutation>(options);
|
||||
|
||||
const typedUpdateFn = (args: CreateContactPointArgs) => {
|
||||
// @ts-expect-error this one is just impossible for me to figure out
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import { countBy, isEmpty } from 'lodash';
|
||||
|
||||
import { Receiver } from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { Receiver } from '../api/notifications/v0alpha1/notifications.api.gen';
|
||||
import { ContactPoint } from '../api/notifications/v0alpha1/types';
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { RoutingTreeMatcher } from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
import { RoutingTreeMatcher } from '../api/notifications/v0alpha1/notifications.api.gen';
|
||||
|
||||
export type Label = [string, string];
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { API_VERSION, RoutingTree } from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { VERSION } from '../../api/notifications/v0alpha1/const';
|
||||
import { LabelMatcherFactory, RouteFactory } from '../../api/notifications/v0alpha1/mocks/fakes/Routes';
|
||||
import { RoutingTree } from '../../api/notifications/v0alpha1/notifications.api.gen';
|
||||
import { Label } from '../../matchers/types';
|
||||
|
||||
import { matchInstancesToRouteTrees } from './useMatchPolicies';
|
||||
@@ -16,7 +16,7 @@ describe('matchInstancesToRouteTrees', () => {
|
||||
const trees: RoutingTree[] = [
|
||||
{
|
||||
kind: 'RoutingTree',
|
||||
apiVersion: API_VERSION,
|
||||
apiVersion: VERSION,
|
||||
metadata: { name: treeName },
|
||||
spec: {
|
||||
defaults: {
|
||||
@@ -24,6 +24,7 @@ describe('matchInstancesToRouteTrees', () => {
|
||||
},
|
||||
routes: [route],
|
||||
},
|
||||
status: {},
|
||||
},
|
||||
];
|
||||
|
||||
@@ -50,7 +51,7 @@ describe('matchInstancesToRouteTrees', () => {
|
||||
const trees: RoutingTree[] = [
|
||||
{
|
||||
kind: 'RoutingTree',
|
||||
apiVersion: API_VERSION,
|
||||
apiVersion: VERSION,
|
||||
metadata: { name: treeName },
|
||||
spec: {
|
||||
defaults: {
|
||||
@@ -58,6 +59,7 @@ describe('matchInstancesToRouteTrees', () => {
|
||||
},
|
||||
routes: [route],
|
||||
},
|
||||
status: {},
|
||||
},
|
||||
];
|
||||
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
import { useCallback } from 'react';
|
||||
|
||||
import {
|
||||
RoutingTree,
|
||||
generatedAPI as notificationsAPIv0alpha1,
|
||||
} from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { RoutingTree, notificationsAPI } from '../../api/notifications/v0alpha1/notifications.api.gen';
|
||||
import { Label } from '../../matchers/types';
|
||||
import { USER_DEFINED_TREE_NAME } from '../consts';
|
||||
import { Route, RouteWithID } from '../types';
|
||||
@@ -28,11 +24,6 @@ export type InstanceMatchResult = {
|
||||
matchedRoutes: RouteMatch[];
|
||||
};
|
||||
|
||||
interface UseMatchInstancesToRouteTreesReturnType
|
||||
extends ReturnType<typeof notificationsAPIv0alpha1.endpoints.listRoutingTree.useQuery> {
|
||||
matchInstancesToRouteTrees: (instances: Label[][]) => InstanceMatchResult[];
|
||||
}
|
||||
|
||||
/**
|
||||
* React hook that finds notification policy routes in all routing trees that match the provided set of alert instances.
|
||||
*
|
||||
@@ -44,8 +35,8 @@ interface UseMatchInstancesToRouteTreesReturnType
|
||||
* @returns An object containing a `matchInstancesToRoutingTrees` function that takes alert instances
|
||||
* and returns an array of InstanceMatchResult objects, each containing the matched routes and matching details
|
||||
*/
|
||||
export function useMatchInstancesToRouteTrees(): UseMatchInstancesToRouteTreesReturnType {
|
||||
const { data, ...rest } = notificationsAPIv0alpha1.endpoints.listRoutingTree.useQuery(
|
||||
export function useMatchInstancesToRouteTrees() {
|
||||
const { data, ...rest } = notificationsAPI.endpoints.listRoutingTree.useQuery(
|
||||
{},
|
||||
{
|
||||
refetchOnFocus: true,
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import { OverrideProperties } from 'type-fest';
|
||||
|
||||
import { RoutingTreeRoute } from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { RoutingTreeRoute } from '../api/notifications/v0alpha1/notifications.api.gen';
|
||||
import { LabelMatcher } from '../matchers/types';
|
||||
|
||||
// type-narrow the route tree
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import { groupBy, isArray, pick, reduce, uniqueId } from 'lodash';
|
||||
|
||||
import { RoutingTree, RoutingTreeRoute } from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { RoutingTree, RoutingTreeRoute } from '../api/notifications/v0alpha1/notifications.api.gen';
|
||||
import { Label } from '../matchers/types';
|
||||
import { LabelMatchDetails, matchLabels } from '../matchers/utils';
|
||||
|
||||
|
||||
@@ -19,5 +19,5 @@ export { type LabelMatcher, type Label } from './grafana/matchers/types';
|
||||
export { matchLabelsSet, matchLabels, isLabelMatch, type LabelMatchDetails } from './grafana/matchers/utils';
|
||||
|
||||
// API endpoints
|
||||
export { generatedAPI as notificationsAPIv0alpha1 } from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
export { generatedAPI as rulesAPIv0alpha1 } from '@grafana/api-clients/rtkq/rules.alerting/v0alpha1';
|
||||
export { notificationsAPI as notificationsAPIv0alpha1 } from './grafana/api/notifications/v0alpha1/notifications.api.gen';
|
||||
export { rulesAPI as rulesAPIv0alpha1 } from './grafana/api/rules/v0alpha1/rules.api.gen';
|
||||
|
||||
@@ -2,25 +2,13 @@ import { configureStore } from '@reduxjs/toolkit';
|
||||
import { useEffect } from 'react';
|
||||
import { Provider } from 'react-redux';
|
||||
|
||||
import { MockBackendSrv } from '@grafana/api-clients';
|
||||
import { generatedAPI as notificationsAPIv0alpha1 } from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
import { generatedAPI as rulesAPIv0alpha1 } from '@grafana/api-clients/rtkq/rules.alerting/v0alpha1';
|
||||
import { setBackendSrv } from '@grafana/runtime';
|
||||
|
||||
// Initialize BackendSrv for tests - this allows RTKQ to make HTTP requests
|
||||
// The actual HTTP requests will be intercepted by MSW (setupMockServer)
|
||||
// We only need to implement fetch() which is what RTKQ uses
|
||||
// we could remove this once @grafana/api-client no longer uses the BackendSrv
|
||||
// @ts-ignore
|
||||
setBackendSrv(new MockBackendSrv());
|
||||
import { notificationsAPIv0alpha1 } from '../src/unstable';
|
||||
|
||||
// create an empty store
|
||||
export const store: ReturnType<typeof configureStore> = configureStore({
|
||||
middleware: (getDefaultMiddleware) =>
|
||||
getDefaultMiddleware().concat(notificationsAPIv0alpha1.middleware).concat(rulesAPIv0alpha1.middleware),
|
||||
export const store = configureStore({
|
||||
middleware: (getDefaultMiddleware) => getDefaultMiddleware().concat(notificationsAPIv0alpha1.middleware),
|
||||
reducer: {
|
||||
[notificationsAPIv0alpha1.reducerPath]: notificationsAPIv0alpha1.reducer,
|
||||
[rulesAPIv0alpha1.reducerPath]: rulesAPIv0alpha1.reducer,
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
@@ -13,14 +13,7 @@ import '@testing-library/jest-dom';
|
||||
* method which wraps the passed element in all of the necessary Providers,
|
||||
* so it can render correctly in the context of the application
|
||||
*/
|
||||
const customRender = (
|
||||
ui: React.ReactNode,
|
||||
renderOptions: RenderOptions = {}
|
||||
): {
|
||||
renderResult: ReturnType<typeof render>;
|
||||
user: ReturnType<typeof userEvent.setup>;
|
||||
store: typeof store;
|
||||
} => {
|
||||
const customRender = (ui: React.ReactNode, renderOptions: RenderOptions = {}) => {
|
||||
const user = userEvent.setup();
|
||||
const Providers = renderOptions.wrapper || getDefaultWrapper();
|
||||
|
||||
|
||||
@@ -116,18 +116,6 @@
|
||||
"import": "./dist/esm/clients/rtkq/shorturl/v1beta1/index.mjs",
|
||||
"require": "./dist/cjs/clients/rtkq/shorturl/v1beta1/index.cjs"
|
||||
},
|
||||
"./rtkq/notifications.alerting/v0alpha1": {
|
||||
"@grafana-app/source": "./src/clients/rtkq/notifications.alerting/v0alpha1/index.ts",
|
||||
"types": "./dist/types/clients/rtkq/notifications.alerting/v0alpha1/index.d.ts",
|
||||
"import": "./dist/esm/clients/rtkq/notifications.alerting/v0alpha1/index.mjs",
|
||||
"require": "./dist/cjs/clients/rtkq/notifications.alerting/v0alpha1/index.cjs"
|
||||
},
|
||||
"./rtkq/rules.alerting/v0alpha1": {
|
||||
"@grafana-app/source": "./src/clients/rtkq/rules.alerting/v0alpha1/index.ts",
|
||||
"types": "./dist/types/clients/rtkq/rules.alerting/v0alpha1/index.d.ts",
|
||||
"import": "./dist/esm/clients/rtkq/rules.alerting/v0alpha1/index.mjs",
|
||||
"require": "./dist/cjs/clients/rtkq/rules.alerting/v0alpha1/index.cjs"
|
||||
},
|
||||
"./rtkq/historian.alerting/v0alpha1": {
|
||||
"@grafana-app/source": "./src/clients/rtkq/historian.alerting/v0alpha1/index.ts",
|
||||
"types": "./dist/types/clients/rtkq/historian.alerting/v0alpha1/index.d.ts",
|
||||
|
||||
@@ -10,12 +10,10 @@ import { generatedAPI as historianAlertingAPIv0alpha1 } from './historian.alerti
|
||||
import { generatedAPI as iamAPIv0alpha1 } from './iam/v0alpha1';
|
||||
import { generatedAPI as logsdrilldownAPIv1alpha1 } from './logsdrilldown/v1alpha1';
|
||||
import { generatedAPI as migrateToCloudAPI } from './migrate-to-cloud';
|
||||
import { generatedAPI as notificationsAlertingAPIv0alpha1 } from './notifications.alerting/v0alpha1';
|
||||
import { generatedAPI as playlistAPIv0alpha1 } from './playlist/v0alpha1';
|
||||
import { generatedAPI as preferencesUserAPI } from './preferences/user';
|
||||
import { generatedAPI as preferencesAPIv1alpha1 } from './preferences/v1alpha1';
|
||||
import { generatedAPI as provisioningAPIv0alpha1 } from './provisioning/v0alpha1';
|
||||
import { generatedAPI as rulesAlertingAPIv0alpha1 } from './rules.alerting/v0alpha1';
|
||||
import { generatedAPI as shortURLAPIv1beta1 } from './shorturl/v1beta1';
|
||||
import { generatedAPI as legacyUserAPI } from './user';
|
||||
// PLOP_INJECT_IMPORT
|
||||
@@ -35,8 +33,6 @@ export const allMiddleware = [
|
||||
shortURLAPIv1beta1.middleware,
|
||||
correlationsAPIv0alpha1.middleware,
|
||||
legacyUserAPI.middleware,
|
||||
notificationsAlertingAPIv0alpha1.middleware,
|
||||
rulesAlertingAPIv0alpha1.middleware,
|
||||
historianAlertingAPIv0alpha1.middleware,
|
||||
logsdrilldownAPIv1alpha1.middleware,
|
||||
// PLOP_INJECT_MIDDLEWARE
|
||||
@@ -57,8 +53,6 @@ export const allReducers = {
|
||||
[shortURLAPIv1beta1.reducerPath]: shortURLAPIv1beta1.reducer,
|
||||
[correlationsAPIv0alpha1.reducerPath]: correlationsAPIv0alpha1.reducer,
|
||||
[legacyUserAPI.reducerPath]: legacyUserAPI.reducer,
|
||||
[notificationsAlertingAPIv0alpha1.reducerPath]: notificationsAlertingAPIv0alpha1.reducer,
|
||||
[rulesAlertingAPIv0alpha1.reducerPath]: rulesAlertingAPIv0alpha1.reducer,
|
||||
[historianAlertingAPIv0alpha1.reducerPath]: historianAlertingAPIv0alpha1.reducer,
|
||||
[logsdrilldownAPIv1alpha1.reducerPath]: logsdrilldownAPIv1alpha1.reducer,
|
||||
// PLOP_INJECT_REDUCER
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
import { createApi } from '@reduxjs/toolkit/query/react';
|
||||
|
||||
import { getAPIBaseURL } from '../../../../utils/utils';
|
||||
import { createBaseQuery } from '../../createBaseQuery';
|
||||
|
||||
export const API_GROUP = 'notifications.alerting.grafana.app' as const;
|
||||
export const API_VERSION = 'v0alpha1' as const;
|
||||
export const BASE_URL = getAPIBaseURL(API_GROUP, API_VERSION);
|
||||
|
||||
export const api = createApi({
|
||||
reducerPath: 'notificationsAlertingAPIv0alpha1',
|
||||
baseQuery: createBaseQuery({
|
||||
baseURL: BASE_URL,
|
||||
}),
|
||||
endpoints: () => ({}),
|
||||
});
|
||||
@@ -1,5 +0,0 @@
|
||||
export { BASE_URL, API_GROUP, API_VERSION } from './baseAPI';
|
||||
import { generatedAPI as rawAPI } from './endpoints.gen';
|
||||
|
||||
export * from './endpoints.gen';
|
||||
export const generatedAPI = rawAPI.enhanceEndpoints({});
|
||||
@@ -1,16 +0,0 @@
|
||||
import { createApi } from '@reduxjs/toolkit/query/react';
|
||||
|
||||
import { getAPIBaseURL } from '../../../../utils/utils';
|
||||
import { createBaseQuery } from '../../createBaseQuery';
|
||||
|
||||
export const API_GROUP = 'rules.alerting.grafana.app' as const;
|
||||
export const API_VERSION = 'v0alpha1' as const;
|
||||
export const BASE_URL = getAPIBaseURL(API_GROUP, API_VERSION);
|
||||
|
||||
export const api = createApi({
|
||||
reducerPath: 'rulesAlertingAPIv0alpha1',
|
||||
baseQuery: createBaseQuery({
|
||||
baseURL: BASE_URL,
|
||||
}),
|
||||
endpoints: () => ({}),
|
||||
});
|
||||
@@ -1,5 +0,0 @@
|
||||
export { BASE_URL, API_GROUP, API_VERSION } from './baseAPI';
|
||||
import { generatedAPI as rawAPI } from './endpoints.gen';
|
||||
|
||||
export * from './endpoints.gen';
|
||||
export const generatedAPI = rawAPI.enhanceEndpoints({});
|
||||
@@ -1,4 +1 @@
|
||||
export { getAPINamespace, getAPIBaseURL, normalizeError, handleRequestError } from './utils/utils';
|
||||
|
||||
/* @TODO figure out how to automatically set the MockBackendSrv when consumers of this package write tests using the exported clients */
|
||||
export { MockBackendSrv } from './utils/backendSrv.mock';
|
||||
|
||||
@@ -108,8 +108,6 @@ const config: ConfigFile = {
|
||||
...createAPIConfig('preferences', 'v1alpha1'),
|
||||
...createAPIConfig('provisioning', 'v0alpha1'),
|
||||
...createAPIConfig('shorturl', 'v1beta1'),
|
||||
...createAPIConfig('notifications.alerting', 'v0alpha1'),
|
||||
...createAPIConfig('rules.alerting', 'v0alpha1'),
|
||||
...createAPIConfig('historian.alerting', 'v0alpha1'),
|
||||
...createAPIConfig('logsdrilldown', 'v1alpha1'),
|
||||
// PLOP_INJECT_API_CLIENT - Used by the API client generator
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
import { Observable } from 'rxjs';
|
||||
import { fromFetch } from 'rxjs/fetch';
|
||||
|
||||
import { BackendSrv, BackendSrvRequest, FetchResponse } from '@grafana/runtime';
|
||||
|
||||
/**
|
||||
* Minimal mock implementation of BackendSrv for testing.
|
||||
* Only implements the fetch() method which is used by RTKQ.
|
||||
* HTTP requests are intercepted by MSW in tests.
|
||||
*/
|
||||
export class MockBackendSrv implements Partial<BackendSrv> {
|
||||
fetch<T>(options: BackendSrvRequest): Observable<FetchResponse<T>> {
|
||||
const init: RequestInit = {
|
||||
method: options.method || 'GET',
|
||||
headers: options.headers,
|
||||
body: options.data ? JSON.stringify(options.data) : undefined,
|
||||
credentials: options.credentials,
|
||||
signal: options.abortSignal,
|
||||
};
|
||||
|
||||
return new Observable((observer) => {
|
||||
fromFetch(options.url, init).subscribe({
|
||||
next: async (response) => {
|
||||
try {
|
||||
const data = await response.json();
|
||||
observer.next({
|
||||
data,
|
||||
status: response.status,
|
||||
statusText: response.statusText,
|
||||
ok: response.ok,
|
||||
headers: response.headers,
|
||||
redirected: response.redirected,
|
||||
type: response.type,
|
||||
url: response.url,
|
||||
config: options,
|
||||
});
|
||||
observer.complete();
|
||||
} catch (error) {
|
||||
observer.error(error);
|
||||
}
|
||||
},
|
||||
error: (error) => observer.error(error),
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1259,8 +1259,4 @@ export interface FeatureToggles {
|
||||
* Enables the ASAP smoothing transformation for time series data
|
||||
*/
|
||||
smoothingTransformation?: boolean;
|
||||
/**
|
||||
* Enables the creation of keepers that manage secrets stored on AWS secrets manager
|
||||
*/
|
||||
secretsManagementAppPlatformAwsKeeper?: boolean;
|
||||
}
|
||||
|
||||
1187
packages/grafana-prometheus/src/dashboards/grafana_stats.json
Normal file
1187
packages/grafana-prometheus/src/dashboards/grafana_stats.json
Normal file
File diff suppressed because it is too large
Load Diff
1353
packages/grafana-prometheus/src/dashboards/prometheus_2_stats.json
Normal file
1353
packages/grafana-prometheus/src/dashboards/prometheus_2_stats.json
Normal file
File diff suppressed because it is too large
Load Diff
834
packages/grafana-prometheus/src/dashboards/prometheus_stats.json
Normal file
834
packages/grafana-prometheus/src/dashboards/prometheus_stats.json
Normal file
@@ -0,0 +1,834 @@
|
||||
{
|
||||
"_comment": "Core Grafana history https://github.com/grafana/grafana/blob/v11.0.0-preview/public/app/plugins/datasource/prometheus/dashboards/prometheus_stats.json",
|
||||
"__inputs": [
|
||||
{
|
||||
"name": "DS_GDEV-PROMETHEUS",
|
||||
"label": "gdev-prometheus",
|
||||
"description": "",
|
||||
"type": "datasource",
|
||||
"pluginId": "prometheus",
|
||||
"pluginName": "Prometheus"
|
||||
}
|
||||
],
|
||||
"__requires": [
|
||||
{
|
||||
"type": "grafana",
|
||||
"id": "grafana",
|
||||
"name": "Grafana",
|
||||
"version": "8.1.0-pre"
|
||||
},
|
||||
{
|
||||
"type": "datasource",
|
||||
"id": "prometheus",
|
||||
"name": "Prometheus",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "stat",
|
||||
"name": "Stat",
|
||||
"version": ""
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "text",
|
||||
"name": "Text",
|
||||
"version": ""
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "timeseries",
|
||||
"name": "Time series",
|
||||
"version": ""
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"iteration": 1624859749459,
|
||||
"links": [
|
||||
{
|
||||
"icon": "info",
|
||||
"tags": [],
|
||||
"targetBlank": true,
|
||||
"title": "Grafana Docs",
|
||||
"tooltip": "",
|
||||
"type": "link",
|
||||
"url": "https://grafana.com/docs/grafana/latest/"
|
||||
},
|
||||
{
|
||||
"icon": "info",
|
||||
"tags": [],
|
||||
"targetBlank": true,
|
||||
"title": "Prometheus Docs",
|
||||
"type": "link",
|
||||
"url": "http://prometheus.io/docs/introduction/overview/"
|
||||
}
|
||||
],
|
||||
"panels": [
|
||||
{
|
||||
"cacheTimeout": null,
|
||||
"datasource": "${DS_GDEV-PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"decimals": 1,
|
||||
"mappings": [
|
||||
{
|
||||
"options": {
|
||||
"match": "null",
|
||||
"result": {
|
||||
"text": "N/A"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
}
|
||||
],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "s"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 6,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 5,
|
||||
"interval": null,
|
||||
"links": [],
|
||||
"maxDataPoints": 100,
|
||||
"options": {
|
||||
"colorMode": "none",
|
||||
"graphMode": "none",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "horizontal",
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"text": {},
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "(time() - process_start_time_seconds{job=\"prometheus\", instance=~\"$node\"})",
|
||||
"intervalFactor": 2,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Uptime",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"cacheTimeout": null,
|
||||
"datasource": "${DS_GDEV-PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"fixedColor": "rgb(31, 120, 193)",
|
||||
"mode": "fixed"
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "rgba(50, 172, 45, 0.97)",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "rgba(237, 129, 40, 0.89)",
|
||||
"value": 1
|
||||
},
|
||||
{
|
||||
"color": "rgba(245, 54, 54, 0.9)",
|
||||
"value": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 6,
|
||||
"x": 6,
|
||||
"y": 0
|
||||
},
|
||||
"id": 6,
|
||||
"interval": null,
|
||||
"links": [],
|
||||
"maxDataPoints": 100,
|
||||
"options": {
|
||||
"colorMode": "none",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "horizontal",
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"text": {},
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "prometheus_local_storage_memory_series{instance=~\"$node\"}",
|
||||
"intervalFactor": 2,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Local Storage Memory Series",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"cacheTimeout": null,
|
||||
"datasource": "${DS_GDEV-PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"mappings": [
|
||||
{
|
||||
"options": {
|
||||
"0": {
|
||||
"text": "Empty"
|
||||
}
|
||||
},
|
||||
"type": "value"
|
||||
}
|
||||
],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "rgba(50, 172, 45, 0.97)",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "rgba(237, 129, 40, 0.89)",
|
||||
"value": 500
|
||||
},
|
||||
{
|
||||
"color": "rgba(245, 54, 54, 0.9)",
|
||||
"value": 4000
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 6,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"id": 7,
|
||||
"interval": null,
|
||||
"links": [],
|
||||
"maxDataPoints": 100,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "horizontal",
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"text": {},
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "prometheus_local_storage_indexing_queue_length{instance=~\"$node\"}",
|
||||
"intervalFactor": 2,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Internal Storage Queue Length",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": null,
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 6,
|
||||
"x": 18,
|
||||
"y": 0
|
||||
},
|
||||
"id": 9,
|
||||
"links": [],
|
||||
"options": {
|
||||
"content": "<span style=\"font-family: 'Open Sans', 'Helvetica Neue', Helvetica; font-size: 25px;vertical-align: text-top;color: #bbbfc2;margin-left: 10px;\">Prometheus</span>\n\n<p style=\"margin-top: 10px;\">You're using Prometheus, an open-source systems monitoring and alerting toolkit originally built at SoundCloud. For more information, check out the <a href=\"https://grafana.com/\">Grafana</a> and <a href=\"http://prometheus.io/\">Prometheus</a> projects.</p>",
|
||||
"mode": "html"
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"style": {},
|
||||
"transparent": true,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"datasource": "${DS_GDEV-PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 10,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "never",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"links": [],
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "short"
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "prometheus"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "color",
|
||||
"value": {
|
||||
"fixedColor": "#C15C17",
|
||||
"mode": "fixed"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "{instance=\"localhost:9090\",job=\"prometheus\"}"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "color",
|
||||
"value": {
|
||||
"fixedColor": "#C15C17",
|
||||
"mode": "fixed"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 6,
|
||||
"w": 18,
|
||||
"x": 0,
|
||||
"y": 5
|
||||
},
|
||||
"id": 3,
|
||||
"links": [],
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "rate(prometheus_local_storage_ingested_samples_total{instance=~\"$node\"}[5m])",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{job}}",
|
||||
"metric": "",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Samples ingested (rate-5m)",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": null,
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"gridPos": {
|
||||
"h": 6,
|
||||
"w": 4,
|
||||
"x": 18,
|
||||
"y": 5
|
||||
},
|
||||
"id": 8,
|
||||
"links": [],
|
||||
"options": {
|
||||
"content": "#### Samples Ingested\nThis graph displays the count of samples ingested by the Prometheus server, as measured over the last 5 minutes, per time series in the range vector. When troubleshooting an issue on IRC or GitHub, this is often the first stat requested by the Prometheus team. ",
|
||||
"mode": "markdown"
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"style": {},
|
||||
"transparent": true,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"datasource": "${DS_GDEV-PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 10,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "never",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"links": [],
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "short"
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "prometheus"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "color",
|
||||
"value": {
|
||||
"fixedColor": "#F9BA8F",
|
||||
"mode": "fixed"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "{instance=\"localhost:9090\",interval=\"5s\",job=\"prometheus\"}"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "color",
|
||||
"value": {
|
||||
"fixedColor": "#F9BA8F",
|
||||
"mode": "fixed"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 10,
|
||||
"x": 0,
|
||||
"y": 11
|
||||
},
|
||||
"id": 2,
|
||||
"links": [],
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "rate(prometheus_target_interval_length_seconds_count{instance=~\"$node\"}[5m])",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{job}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Target Scrapes (last 5m)",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": "${DS_GDEV-PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 10,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "never",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"links": [],
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "short"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 8,
|
||||
"x": 10,
|
||||
"y": 11
|
||||
},
|
||||
"id": 14,
|
||||
"links": [],
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "prometheus_target_interval_length_seconds{quantile!=\"0.01\", quantile!=\"0.05\",instance=~\"$node\"}",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{quantile}} ({{interval}})",
|
||||
"metric": "",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Scrape Duration",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": null,
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 6,
|
||||
"x": 18,
|
||||
"y": 11
|
||||
},
|
||||
"id": 11,
|
||||
"links": [],
|
||||
"options": {
|
||||
"content": "#### Scrapes\nPrometheus scrapes metrics from instrumented jobs, either directly or via an intermediary push gateway for short-lived jobs. Target scrapes will show how frequently targets are scraped, as measured over the last 5 minutes, per time series in the range vector. Scrape Duration will show how long the scrapes are taking, with percentiles available as series. ",
|
||||
"mode": "markdown"
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"style": {},
|
||||
"transparent": true,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"datasource": "${DS_GDEV-PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 10,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "never",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"links": [],
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "percentunit"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 18,
|
||||
"x": 0,
|
||||
"y": 18
|
||||
},
|
||||
"id": 12,
|
||||
"links": [],
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "prometheus_evaluator_duration_seconds{quantile!=\"0.01\", quantile!=\"0.05\",instance=~\"$node\"}",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{quantile}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Rule Eval Duration",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": null,
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 6,
|
||||
"x": 18,
|
||||
"y": 18
|
||||
},
|
||||
"id": 15,
|
||||
"links": [],
|
||||
"options": {
|
||||
"content": "#### Rule Evaluation Duration\nThis graph panel plots the duration for all evaluations to execute. The 50th percentile, 90th percentile and 99th percentile are shown as three separate series to help identify outliers that may be skewing the data.",
|
||||
"mode": "markdown"
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"style": {},
|
||||
"transparent": true,
|
||||
"type": "text"
|
||||
}
|
||||
],
|
||||
"refresh": false,
|
||||
"revision": "1.0",
|
||||
"schemaVersion": 30,
|
||||
"tags": ["prometheus"],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {},
|
||||
"datasource": "${DS_GDEV-PROMETHEUS}",
|
||||
"definition": "",
|
||||
"description": null,
|
||||
"error": null,
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": "HOST:",
|
||||
"multi": false,
|
||||
"name": "node",
|
||||
"options": [],
|
||||
"query": {
|
||||
"query": "label_values(prometheus_build_info, instance)",
|
||||
"refId": "gdev-prometheus-node-Variable-Query"
|
||||
},
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"skipUrlSync": false,
|
||||
"sort": 1,
|
||||
"tagValuesQuery": "",
|
||||
"tagsQuery": "",
|
||||
"type": "query",
|
||||
"useTags": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-5m",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {
|
||||
"now": true,
|
||||
"refresh_intervals": ["5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d"]
|
||||
},
|
||||
"timezone": "browser",
|
||||
"title": "Prometheus Stats",
|
||||
"uid": "rpfmFFz7z",
|
||||
"version": 2
|
||||
}
|
||||
@@ -46,7 +46,6 @@ import (
|
||||
_ "sigs.k8s.io/randfill"
|
||||
_ "xorm.io/builder"
|
||||
|
||||
_ "github.com/aws/aws-sdk-go-v2/service/secretsmanager"
|
||||
_ "github.com/grafana/authlib/authn"
|
||||
_ "github.com/grafana/authlib/authz"
|
||||
_ "github.com/grafana/authlib/cache"
|
||||
|
||||
@@ -2,84 +2,54 @@ package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"sync"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
)
|
||||
|
||||
// loggerFactory is a function that creates a Logger given a name.
|
||||
// It can be set by calling SetLoggerFactory to use a custom logger implementation.
|
||||
var loggerFactory func(name string) Logger
|
||||
|
||||
// SetLoggerFactory sets the factory function used to create loggers.
|
||||
// This should be called during initialization to register a custom logger implementation.
|
||||
// If not set, a default slog-based logger will be used.
|
||||
func SetLoggerFactory(factory func(name string) Logger) {
|
||||
loggerFactory = factory
|
||||
}
|
||||
|
||||
var slogLogManager = &slogLoggerManager{
|
||||
cache: sync.Map{},
|
||||
}
|
||||
|
||||
func New(name string) Logger {
|
||||
if loggerFactory != nil {
|
||||
return loggerFactory(name)
|
||||
return &grafanaInfraLogWrapper{
|
||||
l: log.New(name),
|
||||
}
|
||||
// add a caching layer since slog doesn't perform any caching itself
|
||||
return slogLogManager.getOrCreate(name)
|
||||
}
|
||||
|
||||
type slogLoggerManager struct {
|
||||
cache sync.Map
|
||||
type grafanaInfraLogWrapper struct {
|
||||
l *log.ConcreteLogger
|
||||
}
|
||||
|
||||
func (m *slogLoggerManager) getOrCreate(name string) Logger {
|
||||
if cached, ok := m.cache.Load(name); ok {
|
||||
return cached.(*slogLogger)
|
||||
}
|
||||
|
||||
logger := &slogLogger{
|
||||
logger: slog.Default().With("logger", name),
|
||||
name: name,
|
||||
}
|
||||
actual, _ := m.cache.LoadOrStore(name, logger)
|
||||
return actual.(*slogLogger)
|
||||
}
|
||||
|
||||
type slogLogger struct {
|
||||
logger *slog.Logger
|
||||
name string
|
||||
}
|
||||
|
||||
func (l *slogLogger) New(ctx ...any) Logger {
|
||||
func (d *grafanaInfraLogWrapper) New(ctx ...any) Logger {
|
||||
if len(ctx) == 0 {
|
||||
return &slogLogger{
|
||||
logger: l.logger,
|
||||
name: l.name,
|
||||
return &grafanaInfraLogWrapper{
|
||||
l: d.l.New(),
|
||||
}
|
||||
}
|
||||
return &slogLogger{
|
||||
logger: l.logger.With(ctx...),
|
||||
name: l.name,
|
||||
|
||||
return &grafanaInfraLogWrapper{
|
||||
l: d.l.New(ctx...),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *slogLogger) Debug(msg string, ctx ...any) {
|
||||
l.logger.Debug(msg, ctx...)
|
||||
func (d *grafanaInfraLogWrapper) Debug(msg string, ctx ...any) {
|
||||
d.l.Debug(msg, ctx...)
|
||||
}
|
||||
|
||||
func (l *slogLogger) Info(msg string, ctx ...any) {
|
||||
l.logger.Info(msg, ctx...)
|
||||
func (d *grafanaInfraLogWrapper) Info(msg string, ctx ...any) {
|
||||
d.l.Info(msg, ctx...)
|
||||
}
|
||||
|
||||
func (l *slogLogger) Warn(msg string, ctx ...any) {
|
||||
l.logger.Warn(msg, ctx...)
|
||||
func (d *grafanaInfraLogWrapper) Warn(msg string, ctx ...any) {
|
||||
d.l.Warn(msg, ctx...)
|
||||
}
|
||||
|
||||
func (l *slogLogger) Error(msg string, ctx ...any) {
|
||||
l.logger.Error(msg, ctx...)
|
||||
func (d *grafanaInfraLogWrapper) Error(msg string, ctx ...any) {
|
||||
d.l.Error(msg, ctx...)
|
||||
}
|
||||
|
||||
func (l *slogLogger) FromContext(_ context.Context) Logger {
|
||||
return l
|
||||
func (d *grafanaInfraLogWrapper) FromContext(ctx context.Context) Logger {
|
||||
concreteInfraLogger, ok := d.l.FromContext(ctx).(*log.ConcreteLogger)
|
||||
if !ok {
|
||||
return d.New()
|
||||
}
|
||||
return &grafanaInfraLogWrapper{
|
||||
l: concreteInfraLogger,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -170,13 +170,6 @@ func (s *DashboardStarsStorage) write(ctx context.Context, obj *collections.Star
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Send an error if we try to save a non-dashboard star
|
||||
for _, res := range obj.Spec.Resource {
|
||||
if res.Group != "dashboard.grafana.app" || res.Kind != "Dashboard" {
|
||||
return nil, fmt.Errorf("only dashboard stars are supported until the migration to unified storage is complete")
|
||||
}
|
||||
}
|
||||
|
||||
user, err := s.users.GetByUID(ctx, &user.GetUserByUIDQuery{
|
||||
UID: owner.Identifier,
|
||||
})
|
||||
|
||||
@@ -276,7 +276,7 @@ func (b *APIBuilder) oneFlagHandler(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if b.providerType == setting.GOFFProviderType || b.providerType == setting.OFREPProviderType {
|
||||
if b.providerType == setting.GOFFProviderType {
|
||||
b.proxyFlagReq(ctx, flagKey, isAuthedReq, w, r)
|
||||
return
|
||||
}
|
||||
@@ -304,7 +304,7 @@ func (b *APIBuilder) allFlagsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
isAuthedReq := b.isAuthenticatedRequest(r)
|
||||
span.SetAttributes(attribute.Bool("authenticated", isAuthedReq))
|
||||
|
||||
if b.providerType == setting.GOFFProviderType || b.providerType == setting.OFREPProviderType {
|
||||
if b.providerType == setting.GOFFProviderType {
|
||||
b.proxyAllFlagReq(ctx, isAuthedReq, w, r)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -9,11 +9,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
)
|
||||
|
||||
const (
|
||||
// This constant can be used as a key in resource tags
|
||||
GrafanaSecretsManagerName = "grafana-secrets-manager"
|
||||
)
|
||||
|
||||
var (
|
||||
// The name used to refer to the system keeper
|
||||
SystemKeeperName = "system"
|
||||
@@ -107,8 +102,8 @@ func (s ExternalID) String() string {
|
||||
// Keeper is the interface for secret keepers.
|
||||
type Keeper interface {
|
||||
Store(ctx context.Context, cfg secretv1beta1.KeeperConfig, namespace xkube.Namespace, name string, version int64, exposedValueOrRef string) (ExternalID, error)
|
||||
Update(ctx context.Context, cfg secretv1beta1.KeeperConfig, namespace xkube.Namespace, name string, version int64, exposedValueOrRef string) error
|
||||
Expose(ctx context.Context, cfg secretv1beta1.KeeperConfig, namespace xkube.Namespace, name string, version int64) (secretv1beta1.ExposedSecureValue, error)
|
||||
RetrieveReference(ctx context.Context, cfg secretv1beta1.KeeperConfig, ref string) (secretv1beta1.ExposedSecureValue, error)
|
||||
Delete(ctx context.Context, cfg secretv1beta1.KeeperConfig, namespace xkube.Namespace, name string, version int64) error
|
||||
}
|
||||
|
||||
|
||||
@@ -21,10 +21,8 @@ type DecryptSecureValue struct {
|
||||
}
|
||||
|
||||
var (
|
||||
ErrSecureValueNotFound = errors.New("secure value not found")
|
||||
ErrSecureValueAlreadyExists = errors.New("secure value already exists")
|
||||
ErrReferenceWithSystemKeeper = errors.New("tried to create secure value using reference with system keeper, references can only be used with 3rd party keepers")
|
||||
ErrSecureValueMissingSecretAndRef = errors.New("secure value spec doesn't have neither a secret or reference")
|
||||
ErrSecureValueNotFound = errors.New("secure value not found")
|
||||
ErrSecureValueAlreadyExists = errors.New("secure value already exists")
|
||||
)
|
||||
|
||||
type ReadOpts struct {
|
||||
|
||||
@@ -103,12 +103,9 @@ func (w *Worker) Cleanup(ctx context.Context, sv *secretv1beta1.SecureValue) err
|
||||
return fmt.Errorf("getting keeper for config: namespace=%+v keeperName=%+v %w", sv.Namespace, sv.Status.Keeper, err)
|
||||
}
|
||||
|
||||
// If the secure value doesn't use a reference, delete the secret
|
||||
if sv.Spec.Ref == nil {
|
||||
// Keeper deletion is idempotent
|
||||
if err := keeper.Delete(ctx, keeperCfg, xkube.Namespace(sv.Namespace), sv.Name, sv.Status.Version); err != nil {
|
||||
return fmt.Errorf("deleting secure value from keeper: %w", err)
|
||||
}
|
||||
// Keeper deletion is idempotent
|
||||
if err := keeper.Delete(ctx, keeperCfg, xkube.Namespace(sv.Namespace), sv.Name, sv.Status.Version); err != nil {
|
||||
return fmt.Errorf("deleting secure value from keeper: %w", err)
|
||||
}
|
||||
|
||||
// Metadata deletion is not idempotent but not found errors are ignored
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package garbagecollectionworker_test
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -96,33 +97,27 @@ func TestBasic(t *testing.T) {
|
||||
require.NoError(t, sut.GarbageCollectionWorker.Cleanup(t.Context(), sv))
|
||||
require.NoError(t, sut.GarbageCollectionWorker.Cleanup(t.Context(), sv))
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("cleaning up secure values that use references", func(t *testing.T) {
|
||||
sut := testutils.Setup(t)
|
||||
|
||||
keeper, err := sut.CreateAWSKeeper(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, sut.KeeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(keeper.Namespace), keeper.Name))
|
||||
|
||||
sv, err := sut.CreateSv(t.Context(), testutils.CreateSvWithSv(&secretv1beta1.SecureValue{
|
||||
var (
|
||||
decryptersGen = rapid.SampledFrom([]string{"svc1", "svc2", "svc3", "svc4", "svc5"})
|
||||
nameGen = rapid.SampledFrom([]string{"n1", "n2", "n3", "n4", "n5"})
|
||||
namespaceGen = rapid.SampledFrom([]string{"ns1", "ns2", "ns3", "ns4", "ns5"})
|
||||
anySecureValueGen = rapid.Custom(func(t *rapid.T) *secretv1beta1.SecureValue {
|
||||
return &secretv1beta1.SecureValue{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: keeper.Namespace,
|
||||
Name: "sv1",
|
||||
Name: nameGen.Draw(t, "name"),
|
||||
Namespace: namespaceGen.Draw(t, "ns"),
|
||||
},
|
||||
Spec: secretv1beta1.SecureValueSpec{
|
||||
Description: "desc1",
|
||||
Ref: ptr.To("ref1"),
|
||||
Decrypters: []string{"decrypter1"},
|
||||
Description: rapid.SampledFrom([]string{"d1", "d2", "d3", "d4", "d5"}).Draw(t, "description"),
|
||||
Value: ptr.To(secretv1beta1.NewExposedSecureValue(rapid.SampledFrom([]string{"v1", "v2", "v3", "v4", "v5"}).Draw(t, "value"))),
|
||||
Decrypters: rapid.SliceOfDistinct(decryptersGen, func(v string) string { return v }).Draw(t, "decrypters"),
|
||||
},
|
||||
}))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = sut.DeleteSv(t.Context(), sv.Namespace, sv.Name)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, sut.GarbageCollectionWorker.Cleanup(t.Context(), sv))
|
||||
Status: secretv1beta1.SecureValueStatus{},
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
|
||||
func TestProperty(t *testing.T) {
|
||||
t.Parallel()
|
||||
@@ -131,59 +126,26 @@ func TestProperty(t *testing.T) {
|
||||
|
||||
rapid.Check(t, func(t *rapid.T) {
|
||||
sut := testutils.Setup(tt)
|
||||
model := testutils.NewModelGsm(nil)
|
||||
model := newModel()
|
||||
|
||||
t.Repeat(map[string]func(*rapid.T){
|
||||
"create": func(t *rapid.T) {
|
||||
var sv *secretv1beta1.SecureValue
|
||||
if rapid.Bool().Draw(t, "withRef") {
|
||||
sv = testutils.AnySecureValueWithRefGen.Draw(t, "sv")
|
||||
} else {
|
||||
sv = testutils.AnySecureValueGen.Draw(t, "sv")
|
||||
}
|
||||
|
||||
sv := anySecureValueGen.Draw(t, "sv")
|
||||
svCopy := sv.DeepCopy()
|
||||
|
||||
createdSv, err := sut.CreateSv(t.Context(), testutils.CreateSvWithSv(sv))
|
||||
if err == nil {
|
||||
svCopy.UID = createdSv.UID
|
||||
}
|
||||
_, modelErr := model.Create(sut.Clock.Now(), svCopy)
|
||||
svCopy.UID = createdSv.UID
|
||||
modelErr := model.create(sut.Clock.Now(), svCopy)
|
||||
require.ErrorIs(t, err, modelErr)
|
||||
},
|
||||
"createKeeper": func(t *rapid.T) {
|
||||
input := testutils.AnyKeeperGen.Draw(t, "keeper")
|
||||
modelKeeper, modelErr := model.CreateKeeper(input)
|
||||
keeper, err := sut.KeeperMetadataStorage.Create(t.Context(), input, "actor-uid")
|
||||
if err != nil || modelErr != nil {
|
||||
require.ErrorIs(t, err, modelErr)
|
||||
return
|
||||
}
|
||||
require.Equal(t, modelKeeper.Name, keeper.Name)
|
||||
},
|
||||
"setKeeperAsActive": func(t *rapid.T) {
|
||||
namespace := testutils.NamespaceGen.Draw(t, "namespace")
|
||||
var keeper string
|
||||
if rapid.Bool().Draw(t, "systemKeeper") {
|
||||
keeper = contracts.SystemKeeperName
|
||||
} else {
|
||||
keeper = testutils.KeeperNameGen.Draw(t, "keeper")
|
||||
}
|
||||
modelErr := model.SetKeeperAsActive(namespace, keeper)
|
||||
err := sut.KeeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(namespace), keeper)
|
||||
if err != nil || modelErr != nil {
|
||||
require.ErrorIs(t, err, modelErr)
|
||||
return
|
||||
}
|
||||
},
|
||||
"delete": func(t *rapid.T) {
|
||||
if len(model.SecureValues) == 0 {
|
||||
if len(model.items) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
i := rapid.IntRange(0, len(model.SecureValues)-1).Draw(t, "index")
|
||||
sv := model.SecureValues[i]
|
||||
_, modelErr := model.Delete(sv.Namespace, sv.Name)
|
||||
i := rapid.IntRange(0, len(model.items)-1).Draw(t, "index")
|
||||
sv := model.items[i]
|
||||
modelErr := model.delete(sv.Namespace, sv.Name)
|
||||
_, err := sut.DeleteSv(t.Context(), sv.Namespace, sv.Name)
|
||||
require.ErrorIs(t, err, modelErr)
|
||||
},
|
||||
@@ -191,7 +153,7 @@ func TestProperty(t *testing.T) {
|
||||
// Taken from secureValueMetadataStorage.acquireLeases
|
||||
minAge := 300 * time.Second
|
||||
maxBatchSize := sut.GarbageCollectionWorker.Cfg.SecretsManagement.GCWorkerMaxBatchSize
|
||||
modelDeleted, modelErr := model.CleanupInactiveSecureValues(sut.Clock.Now(), minAge, maxBatchSize)
|
||||
modelDeleted, modelErr := model.cleanupInactiveSecureValues(sut.Clock.Now(), minAge, maxBatchSize)
|
||||
deleted, err := sut.GarbageCollectionWorker.CleanupInactiveSecureValues(t.Context())
|
||||
require.ErrorIs(t, err, modelErr)
|
||||
|
||||
@@ -212,3 +174,77 @@ func TestProperty(t *testing.T) {
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
type model struct {
|
||||
items []*modelSecureValue
|
||||
}
|
||||
|
||||
type modelSecureValue struct {
|
||||
*secretv1beta1.SecureValue
|
||||
active bool
|
||||
created time.Time
|
||||
}
|
||||
|
||||
func newModel() *model {
|
||||
return &model{
|
||||
items: make([]*modelSecureValue, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *model) create(now time.Time, sv *secretv1beta1.SecureValue) error {
|
||||
created := now
|
||||
for _, item := range m.items {
|
||||
if item.active && item.Namespace == sv.Namespace && item.Name == sv.Name {
|
||||
item.active = false
|
||||
created = item.created
|
||||
break
|
||||
}
|
||||
}
|
||||
m.items = append(m.items, &modelSecureValue{SecureValue: sv, active: true, created: created})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *model) delete(ns string, name string) error {
|
||||
for _, sv := range m.items {
|
||||
if sv.active && sv.Namespace == ns && sv.Name == name {
|
||||
sv.active = false
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return contracts.ErrSecureValueNotFound
|
||||
}
|
||||
|
||||
func (m *model) cleanupInactiveSecureValues(now time.Time, minAge time.Duration, maxBatchSize uint16) ([]*modelSecureValue, error) {
|
||||
// Using a slice to allow duplicates
|
||||
toDelete := make([]*modelSecureValue, 0)
|
||||
|
||||
// The implementation query sorts by created time ascending
|
||||
slices.SortFunc(m.items, func(a, b *modelSecureValue) int {
|
||||
if a.created.Before(b.created) {
|
||||
return -1
|
||||
} else if a.created.After(b.created) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
})
|
||||
|
||||
for _, sv := range m.items {
|
||||
if len(toDelete) >= int(maxBatchSize) {
|
||||
break
|
||||
}
|
||||
|
||||
if !sv.active && now.Sub(sv.created) > minAge {
|
||||
toDelete = append(toDelete, sv)
|
||||
}
|
||||
}
|
||||
|
||||
// PERF: The slices are always small
|
||||
m.items = slices.DeleteFunc(m.items, func(v1 *modelSecureValue) bool {
|
||||
return slices.ContainsFunc(toDelete, func(v2 *modelSecureValue) bool {
|
||||
return v2.UID == v1.UID
|
||||
})
|
||||
})
|
||||
|
||||
return toDelete, nil
|
||||
}
|
||||
|
||||
@@ -107,10 +107,6 @@ func (s *SQLKeeper) Expose(ctx context.Context, cfg secretv1beta1.KeeperConfig,
|
||||
return exposedValue, nil
|
||||
}
|
||||
|
||||
func (s *SQLKeeper) RetrieveReference(ctx context.Context, cfg secretv1beta1.KeeperConfig, ref string) (secretv1beta1.ExposedSecureValue, error) {
|
||||
return "", fmt.Errorf("reference is not implemented by the SQLKeeper")
|
||||
}
|
||||
|
||||
func (s *SQLKeeper) Delete(ctx context.Context, cfg secretv1beta1.KeeperConfig, namespace xkube.Namespace, name string, version int64) error {
|
||||
ctx, span := s.tracer.Start(ctx, "SQLKeeper.Delete", trace.WithAttributes(
|
||||
attribute.String("namespace", namespace.String()),
|
||||
@@ -129,3 +125,27 @@ func (s *SQLKeeper) Delete(ctx context.Context, cfg secretv1beta1.KeeperConfig,
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SQLKeeper) Update(ctx context.Context, cfg secretv1beta1.KeeperConfig, namespace xkube.Namespace, name string, version int64, exposedValueOrRef string) error {
|
||||
ctx, span := s.tracer.Start(ctx, "SQLKeeper.Update", trace.WithAttributes(
|
||||
attribute.String("namespace", namespace.String()),
|
||||
attribute.String("name", name),
|
||||
attribute.Int64("version", version),
|
||||
))
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
encryptedData, err := s.encryptionManager.Encrypt(ctx, namespace, []byte(exposedValueOrRef))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to encrypt value: %w", err)
|
||||
}
|
||||
|
||||
err = s.store.Update(ctx, namespace, name, version, encryptedData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update encrypted value: %w", err)
|
||||
}
|
||||
|
||||
s.metrics.UpdateDuration.WithLabelValues(string(cfg.Type())).Observe(time.Since(start).Seconds())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ func Test_SQLKeeperSetup(t *testing.T) {
|
||||
plaintext1 := "very secret string in namespace 1"
|
||||
plaintext2 := "very secret string in namespace 2"
|
||||
|
||||
keeperCfg := secretv1beta1.NewNamedKeeperConfig("k1", &secretv1beta1.SystemKeeperConfig{})
|
||||
keeperCfg := &secretv1beta1.SystemKeeperConfig{}
|
||||
|
||||
t.Run("storing an encrypted value returns no error", func(t *testing.T) {
|
||||
sut := testutils.Setup(t)
|
||||
@@ -123,6 +123,31 @@ func Test_SQLKeeperSetup(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("updating an existent encrypted value returns no error", func(t *testing.T) {
|
||||
sut := testutils.Setup(t)
|
||||
|
||||
_, err := sut.SQLKeeper.Store(t.Context(), keeperCfg, namespace1, name1, version1, plaintext1)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = sut.SQLKeeper.Update(t.Context(), keeperCfg, namespace1, name1, version1, plaintext2)
|
||||
require.NoError(t, err)
|
||||
|
||||
exposedVal, err := sut.SQLKeeper.Expose(t.Context(), keeperCfg, namespace1, name1, version1)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, exposedVal)
|
||||
assert.Equal(t, plaintext2, exposedVal.DangerouslyExposeAndConsumeValue())
|
||||
})
|
||||
|
||||
t.Run("updating a non existent encrypted value returns error", func(t *testing.T) {
|
||||
sut := testutils.Setup(t)
|
||||
|
||||
_, err := sut.SQLKeeper.Store(t.Context(), keeperCfg, namespace1, name1, version1, plaintext1)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = sut.SQLKeeper.Update(t.Context(), nil, namespace1, "non_existing_name", version1, plaintext2)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("data key migration only runs if both secrets db migrations are enabled", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -141,7 +141,7 @@ func (s *SecureValueService) Update(ctx context.Context, newSecureValue *secretv
|
||||
return nil, false, fmt.Errorf("fetching keeper config: namespace=%+v keeper: %q %w", newSecureValue.Namespace, currentVersion.Status.Keeper, err)
|
||||
}
|
||||
|
||||
if newSecureValue.Spec.Value == nil && newSecureValue.Spec.Ref == nil {
|
||||
if newSecureValue.Spec.Value == nil {
|
||||
keeper, err := s.keeperService.KeeperForConfig(keeperCfg)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("getting keeper for config: namespace=%+v keeperName=%+v %w", newSecureValue.Namespace, newSecureValue.Status.Keeper, err)
|
||||
@@ -150,7 +150,7 @@ func (s *SecureValueService) Update(ctx context.Context, newSecureValue *secretv
|
||||
|
||||
secret, err := keeper.Expose(ctx, keeperCfg, xkube.Namespace(newSecureValue.Namespace), newSecureValue.Name, currentVersion.Status.Version)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("reading secret value from keeper: %w %w", contracts.ErrSecureValueMissingSecretAndRef, err)
|
||||
return nil, false, fmt.Errorf("reading secret value from keeper: %w", err)
|
||||
}
|
||||
|
||||
newSecureValue.Spec.Value = &secret
|
||||
@@ -174,10 +174,6 @@ func (s *SecureValueService) createNewVersion(ctx context.Context, keeperName st
|
||||
return nil, contracts.NewErrValidateSecureValue(errorList)
|
||||
}
|
||||
|
||||
if sv.Spec.Ref != nil && keeperCfg.Type() == secretv1beta1.SystemKeeperType {
|
||||
return nil, contracts.ErrReferenceWithSystemKeeper
|
||||
}
|
||||
|
||||
createdSv, err := s.secureValueMetadataStorage.Create(ctx, keeperName, sv, actorUID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating secure value: %w", err)
|
||||
@@ -193,28 +189,18 @@ func (s *SecureValueService) createNewVersion(ctx context.Context, keeperName st
|
||||
return nil, fmt.Errorf("getting keeper for config: namespace=%+v keeperName=%+v %w", createdSv.Namespace, keeperName, err)
|
||||
}
|
||||
logging.FromContext(ctx).Debug("retrieved keeper", "namespace", createdSv.Namespace, "type", keeperCfg.Type())
|
||||
|
||||
// TODO: can we stop using external id?
|
||||
// TODO: store uses only the namespace and returns and id. It could be a kv instead.
|
||||
// TODO: check that the encrypted store works with multiple versions
|
||||
switch {
|
||||
case sv.Spec.Value != nil:
|
||||
externalID, err := keeper.Store(ctx, keeperCfg, xkube.Namespace(createdSv.Namespace), createdSv.Name, createdSv.Status.Version, sv.Spec.Value.DangerouslyExposeAndConsumeValue())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storing secure value in keeper: %w", err)
|
||||
}
|
||||
createdSv.Status.ExternalID = string(externalID)
|
||||
externalID, err := keeper.Store(ctx, keeperCfg, xkube.Namespace(createdSv.Namespace), createdSv.Name, createdSv.Status.Version, sv.Spec.Value.DangerouslyExposeAndConsumeValue())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storing secure value in keeper: %w", err)
|
||||
}
|
||||
createdSv.Status.ExternalID = string(externalID)
|
||||
|
||||
if err := s.secureValueMetadataStorage.SetExternalID(ctx, xkube.Namespace(createdSv.Namespace), createdSv.Name, createdSv.Status.Version, externalID); err != nil {
|
||||
return nil, fmt.Errorf("setting secure value external id: %w", err)
|
||||
}
|
||||
|
||||
case sv.Spec.Ref != nil:
|
||||
// No-op, there's nothing to store in the keeper since the
|
||||
// secret is already stored in the 3rd party secret store
|
||||
// and it's being referenced.
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("secure value doesn't specify either a secret value or a reference")
|
||||
if err := s.secureValueMetadataStorage.SetExternalID(ctx, xkube.Namespace(createdSv.Namespace), createdSv.Name, createdSv.Status.Version, externalID); err != nil {
|
||||
return nil, fmt.Errorf("setting secure value external id: %w", err)
|
||||
}
|
||||
|
||||
if err := s.secureValueMetadataStorage.SetVersionToActive(ctx, xkube.Namespace(createdSv.Namespace), createdSv.Name, createdSv.Status.Version); err != nil {
|
||||
@@ -380,20 +366,3 @@ func (s *SecureValueService) Delete(ctx context.Context, namespace xkube.Namespa
|
||||
|
||||
return sv, nil
|
||||
}
|
||||
|
||||
func (s *SecureValueService) SetKeeperAsActive(ctx context.Context, namespace xkube.Namespace, name string) error {
|
||||
// The system keeper is not in the database, so skip checking it exists.
|
||||
// TODO: should the system keeper be in the database?
|
||||
if name != contracts.SystemKeeperName {
|
||||
// Check keeper exists. No need to worry about time of check to time of use
|
||||
// since trying to activate a just deleted keeper will result in all
|
||||
// keepers being inactive and defaulting to the system keeper.
|
||||
if _, err := s.keeperMetadataStorage.Read(ctx, namespace, name, contracts.ReadOpts{}); err != nil {
|
||||
return fmt.Errorf("reading keeper before setting as active: %w", err)
|
||||
}
|
||||
}
|
||||
if err := s.keeperMetadataStorage.SetAsActive(ctx, namespace, name); err != nil {
|
||||
return fmt.Errorf("calling keeper metadata storage to set keeper as active: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/grafana/grafana/pkg/registry/apis/secret/testutils"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/secret/xkube"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
@@ -94,149 +93,4 @@ func TestCrud(t *testing.T) {
|
||||
_, err = sut.SecureValueMetadataStorage.Read(t.Context(), xkube.Namespace(sv1.Namespace), sv1.Name, contracts.ReadOpts{})
|
||||
require.ErrorIs(t, err, contracts.ErrSecureValueNotFound)
|
||||
})
|
||||
|
||||
t.Run("secret can be referenced only when the active keeper is a 3rd party keeper", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sut := testutils.Setup(t)
|
||||
|
||||
ref := "path-to-secret"
|
||||
sv := &secretv1beta1.SecureValue{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sv1",
|
||||
Namespace: "ns1",
|
||||
},
|
||||
Spec: secretv1beta1.SecureValueSpec{
|
||||
Description: "desc1",
|
||||
Ref: &ref,
|
||||
Decrypters: []string{"decrypter1"},
|
||||
},
|
||||
Status: secretv1beta1.SecureValueStatus{},
|
||||
}
|
||||
|
||||
// Creating a secure value using ref with the system keeper
|
||||
createdSv, err := sut.CreateSv(t.Context(), testutils.CreateSvWithSv(sv))
|
||||
require.NotNil(t, err)
|
||||
require.Nil(t, createdSv)
|
||||
require.Contains(t, err.Error(), "tried to create secure value using reference with system keeper, references can only be used with 3rd party keepers")
|
||||
|
||||
// Create a 3rd party keeper
|
||||
keeper := &secretv1beta1.Keeper{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "k1",
|
||||
Namespace: "ns1",
|
||||
},
|
||||
Spec: secretv1beta1.KeeperSpec{
|
||||
Description: "desc",
|
||||
Aws: &secretv1beta1.KeeperAWSConfig{
|
||||
Region: "us-east-1",
|
||||
AssumeRole: &secretv1beta1.KeeperAWSAssumeRole{
|
||||
AssumeRoleArn: "arn",
|
||||
ExternalID: "id",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create a 3rd party keeper
|
||||
_, err = sut.KeeperMetadataStorage.Create(t.Context(), keeper, "actor-uid")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set the new keeper as active
|
||||
require.NoError(t, sut.KeeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(keeper.Namespace), keeper.Name))
|
||||
|
||||
// Create a secure value using a ref
|
||||
createdSv, err = sut.CreateSv(t.Context(), testutils.CreateSvWithSv(sv))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, keeper.Name, createdSv.Status.Keeper)
|
||||
})
|
||||
|
||||
t.Run("creating secure value with reference", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sut := testutils.Setup(t)
|
||||
|
||||
// Create a keeper because references cannot be used with the system keeper
|
||||
keeper, err := sut.KeeperMetadataStorage.Create(t.Context(), &secretv1beta1.Keeper{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns",
|
||||
Name: "k1",
|
||||
},
|
||||
Spec: secretv1beta1.KeeperSpec{
|
||||
Aws: &secretv1beta1.KeeperAWSConfig{},
|
||||
},
|
||||
}, "actor-uid")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, sut.KeeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(keeper.Namespace), keeper.Name))
|
||||
|
||||
sv, err := sut.CreateSv(t.Context())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sv)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_SetAsActive(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("setting the system keeper as the active keeper", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sut := testutils.Setup(t)
|
||||
|
||||
namespace := "ns"
|
||||
|
||||
// Create a new keeper
|
||||
keeper, err := sut.KeeperMetadataStorage.Create(t.Context(), &secretv1beta1.Keeper{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns",
|
||||
Name: "k1",
|
||||
},
|
||||
Spec: secretv1beta1.KeeperSpec{
|
||||
Description: "description",
|
||||
Aws: &secretv1beta1.KeeperAWSConfig{},
|
||||
},
|
||||
}, "actor-uid")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set the new keeper as active
|
||||
require.NoError(t, sut.KeeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(keeper.Namespace), keeper.Name))
|
||||
keeperName, _, err := sut.KeeperMetadataStorage.GetActiveKeeperConfig(t.Context(), namespace)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, keeper.Name, keeperName)
|
||||
|
||||
// Set the system keeper as active
|
||||
require.NoError(t, sut.KeeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(namespace), contracts.SystemKeeperName))
|
||||
keeperName, _, err = sut.KeeperMetadataStorage.GetActiveKeeperConfig(t.Context(), namespace)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, contracts.SystemKeeperName, keeperName)
|
||||
})
|
||||
|
||||
t.Run("each namespace can have one active keeper", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sut := testutils.Setup(t)
|
||||
|
||||
k1, err := sut.CreateKeeper(t.Context(), func(ckc *testutils.CreateKeeperConfig) {
|
||||
ckc.Keeper.Namespace = "ns1"
|
||||
ckc.Keeper.Name = "k1"
|
||||
})
|
||||
require.NoError(t, err)
|
||||
k2, err := sut.CreateKeeper(t.Context(), func(ckc *testutils.CreateKeeperConfig) {
|
||||
ckc.Keeper.Namespace = "ns2"
|
||||
ckc.Keeper.Name = "k2"
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, sut.KeeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(k1.Namespace), k1.Name))
|
||||
require.NoError(t, sut.KeeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(k2.Namespace), k2.Name))
|
||||
|
||||
keeperName, _, err := sut.KeeperMetadataStorage.GetActiveKeeperConfig(t.Context(), k1.Namespace)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, k1.Name, keeperName)
|
||||
|
||||
keeperName, _, err = sut.KeeperMetadataStorage.GetActiveKeeperConfig(t.Context(), k2.Namespace)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, k2.Name, keeperName)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
package testutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
secretv1beta1 "github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
"pgregory.net/rapid"
|
||||
)
|
||||
|
||||
var (
|
||||
DecryptersGen = rapid.SampledFrom([]string{"svc1", "svc2", "svc3", "svc4", "svc5"})
|
||||
SecureValueNameGen = rapid.SampledFrom([]string{"n1", "n2", "n3", "n4", "n5"})
|
||||
KeeperNameGen = rapid.SampledFrom([]string{"k1", "k2", "k3", "k4", "k5"})
|
||||
NamespaceGen = rapid.SampledFrom([]string{"ns1", "ns2", "ns3", "ns4", "ns5"})
|
||||
SecretsToRefGen = rapid.SampledFrom([]string{"ref1", "ref2", "ref3", "ref4", "ref5"})
|
||||
// Generator for secure values that specify a secret value
|
||||
AnySecureValueGen = rapid.Custom(func(t *rapid.T) *secretv1beta1.SecureValue {
|
||||
return &secretv1beta1.SecureValue{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: SecureValueNameGen.Draw(t, "name"),
|
||||
Namespace: NamespaceGen.Draw(t, "ns"),
|
||||
},
|
||||
Spec: secretv1beta1.SecureValueSpec{
|
||||
Description: rapid.SampledFrom([]string{"d1", "d2", "d3", "d4", "d5"}).Draw(t, "description"),
|
||||
Value: ptr.To(secretv1beta1.NewExposedSecureValue(rapid.SampledFrom([]string{"v1", "v2", "v3", "v4", "v5"}).Draw(t, "value"))),
|
||||
Decrypters: rapid.SliceOfDistinct(DecryptersGen, func(v string) string { return v }).Draw(t, "decrypters"),
|
||||
},
|
||||
Status: secretv1beta1.SecureValueStatus{},
|
||||
}
|
||||
})
|
||||
// Generator for secure values that reference values from 3rd party stores
|
||||
AnySecureValueWithRefGen = rapid.Custom(func(t *rapid.T) *secretv1beta1.SecureValue {
|
||||
return &secretv1beta1.SecureValue{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: SecureValueNameGen.Draw(t, "name"),
|
||||
Namespace: NamespaceGen.Draw(t, "ns"),
|
||||
},
|
||||
Spec: secretv1beta1.SecureValueSpec{
|
||||
Description: rapid.SampledFrom([]string{"d1", "d2", "d3", "d4", "d5"}).Draw(t, "description"),
|
||||
Ref: ptr.To(SecretsToRefGen.Draw(t, "ref")),
|
||||
Decrypters: rapid.SliceOfDistinct(DecryptersGen, func(v string) string { return v }).Draw(t, "decrypters"),
|
||||
},
|
||||
Status: secretv1beta1.SecureValueStatus{},
|
||||
}
|
||||
})
|
||||
UpdateSecureValueGen = rapid.Custom(func(t *rapid.T) *secretv1beta1.SecureValue {
|
||||
sv := AnySecureValueGen.Draw(t, "sv")
|
||||
// Maybe update the secret value, maybe not
|
||||
if !rapid.Bool().Draw(t, "should_update_value") {
|
||||
sv.Spec.Value = nil
|
||||
}
|
||||
return sv
|
||||
})
|
||||
DecryptGen = rapid.Custom(func(t *rapid.T) DecryptInput {
|
||||
return DecryptInput{
|
||||
Namespace: NamespaceGen.Draw(t, "ns"),
|
||||
Name: SecureValueNameGen.Draw(t, "name"),
|
||||
Decrypter: DecryptersGen.Draw(t, "decrypter"),
|
||||
}
|
||||
})
|
||||
AnyKeeperGen = rapid.Custom(func(t *rapid.T) *secretv1beta1.Keeper {
|
||||
spec := secretv1beta1.KeeperSpec{
|
||||
Description: rapid.String().Draw(t, "description"),
|
||||
}
|
||||
|
||||
keeperType := rapid.SampledFrom([]string{"isAwsKeeper", "isAzureKeeper", "isGcpKeeper", "isVaultKeeper"}).Draw(t, "keeperType")
|
||||
switch keeperType {
|
||||
case "isAwsKeeper":
|
||||
spec.Aws = &secretv1beta1.KeeperAWSConfig{}
|
||||
case "isAzureKeeper":
|
||||
spec.Azure = &secretv1beta1.KeeperAzureConfig{}
|
||||
case "isGcpKeeper":
|
||||
spec.Gcp = &secretv1beta1.KeeperGCPConfig{}
|
||||
case "isVaultKeeper":
|
||||
spec.HashiCorpVault = &secretv1beta1.KeeperHashiCorpConfig{}
|
||||
default:
|
||||
panic(fmt.Sprintf("unhandled keeper type '%+v', did you forget a switch case?", keeperType))
|
||||
}
|
||||
|
||||
return &secretv1beta1.Keeper{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: KeeperNameGen.Draw(t, "name"),
|
||||
Namespace: NamespaceGen.Draw(t, "ns"),
|
||||
},
|
||||
Spec: spec,
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
type DecryptInput struct {
|
||||
Namespace string
|
||||
Name string
|
||||
Decrypter string
|
||||
}
|
||||
@@ -1,321 +0,0 @@
|
||||
package testutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
secretv1beta1 "github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1"
|
||||
"github.com/grafana/grafana/apps/secret/pkg/decrypt"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/secret/contracts"
|
||||
)
|
||||
|
||||
type ModelSecureValue struct {
|
||||
*secretv1beta1.SecureValue
|
||||
active bool
|
||||
created time.Time
|
||||
leaseCreated time.Time
|
||||
}
|
||||
|
||||
type ModelKeeper struct {
|
||||
namespace string
|
||||
name string
|
||||
active bool
|
||||
keeperType secretv1beta1.KeeperType
|
||||
}
|
||||
|
||||
// A simplified in memoruy model of the grafana secrets manager
|
||||
type ModelGsm struct {
|
||||
SecureValues []*ModelSecureValue
|
||||
Keepers []*ModelKeeper
|
||||
modelSecretsManager *ModelAWSSecretsManager
|
||||
}
|
||||
|
||||
func NewModelGsm(modelSecretsManager *ModelAWSSecretsManager) *ModelGsm {
|
||||
return &ModelGsm{modelSecretsManager: modelSecretsManager}
|
||||
}
|
||||
|
||||
func (m *ModelGsm) getNewVersionNumber(namespace, name string) int64 {
|
||||
latestVersion := int64(0)
|
||||
for _, sv := range m.SecureValues {
|
||||
if sv.Namespace == namespace && sv.Name == name {
|
||||
latestVersion = max(latestVersion, sv.Status.Version)
|
||||
}
|
||||
}
|
||||
return latestVersion + 1
|
||||
}
|
||||
|
||||
func (m *ModelGsm) SetVersionToActive(namespace, name string, version int64) {
|
||||
for _, sv := range m.SecureValues {
|
||||
if sv.Namespace == namespace && sv.Name == name {
|
||||
sv.active = sv.Status.Version == version
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ModelGsm) SetVersionToInactive(namespace, name string, version int64) {
|
||||
for _, sv := range m.SecureValues {
|
||||
if sv.Namespace == namespace && sv.Name == name && sv.Status.Version == version {
|
||||
sv.active = false
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ModelGsm) ReadActiveVersion(namespace, name string) *ModelSecureValue {
|
||||
for _, sv := range m.SecureValues {
|
||||
if sv.Namespace == namespace && sv.Name == name && sv.active {
|
||||
return sv
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) Create(now time.Time, sv *secretv1beta1.SecureValue) (*secretv1beta1.SecureValue, error) {
|
||||
keeper := m.getActiveKeeper(sv.Namespace)
|
||||
|
||||
if sv.Spec.Ref != nil && keeper.keeperType == secretv1beta1.SystemKeeperType {
|
||||
return nil, contracts.ErrReferenceWithSystemKeeper
|
||||
}
|
||||
|
||||
sv = sv.DeepCopy()
|
||||
|
||||
// Preserve the original creation time if this secure value already exists
|
||||
created := now
|
||||
if sv := m.ReadActiveVersion(sv.Namespace, sv.Name); sv != nil {
|
||||
created = sv.created
|
||||
}
|
||||
|
||||
modelSv := &ModelSecureValue{SecureValue: sv, active: false, created: created}
|
||||
modelSv.Status.Version = m.getNewVersionNumber(modelSv.Namespace, modelSv.Name)
|
||||
modelSv.Status.ExternalID = fmt.Sprintf("%d", modelSv.Status.Version)
|
||||
modelSv.Status.Keeper = keeper.name
|
||||
m.SecureValues = append(m.SecureValues, modelSv)
|
||||
m.SetVersionToActive(modelSv.Namespace, modelSv.Name, modelSv.Status.Version)
|
||||
return modelSv.SecureValue, nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) getActiveKeeper(namespace string) *ModelKeeper {
|
||||
for _, k := range m.Keepers {
|
||||
if k.namespace == namespace && k.active {
|
||||
return k
|
||||
}
|
||||
}
|
||||
|
||||
// Default to the system keeper when there are no active keepers in the namespace
|
||||
return &ModelKeeper{
|
||||
namespace: namespace,
|
||||
name: contracts.SystemKeeperName,
|
||||
active: true,
|
||||
keeperType: secretv1beta1.SystemKeeperType,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ModelGsm) keeperExists(namespace, name string) bool {
|
||||
return m.findKeeper(namespace, name) != nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) findKeeper(namespace, name string) *ModelKeeper {
|
||||
// The system keeper is not in the list of keepers
|
||||
if name == contracts.SystemKeeperName {
|
||||
return &ModelKeeper{namespace: namespace, name: contracts.SystemKeeperName, active: true, keeperType: secretv1beta1.SystemKeeperType}
|
||||
}
|
||||
for _, k := range m.Keepers {
|
||||
if k.namespace == namespace && k.name == name {
|
||||
return k
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) CreateKeeper(keeper *secretv1beta1.Keeper) (*secretv1beta1.Keeper, error) {
|
||||
if m.keeperExists(keeper.Namespace, keeper.Name) {
|
||||
return nil, contracts.ErrKeeperAlreadyExists
|
||||
}
|
||||
|
||||
var keeperType secretv1beta1.KeeperType
|
||||
switch {
|
||||
case keeper.Spec.Aws != nil:
|
||||
keeperType = secretv1beta1.AWSKeeperType
|
||||
case keeper.Spec.Gcp != nil:
|
||||
keeperType = secretv1beta1.GCPKeeperType
|
||||
case keeper.Spec.Azure != nil:
|
||||
keeperType = secretv1beta1.AzureKeeperType
|
||||
case keeper.Spec.HashiCorpVault != nil:
|
||||
keeperType = secretv1beta1.HashiCorpKeeperType
|
||||
default:
|
||||
keeperType = secretv1beta1.SystemKeeperType
|
||||
}
|
||||
|
||||
m.Keepers = append(m.Keepers, &ModelKeeper{namespace: keeper.Namespace, name: keeper.Name, keeperType: keeperType})
|
||||
|
||||
return keeper.DeepCopy(), nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) SetKeeperAsActive(namespace, keeperName string) error {
|
||||
// Set every other keeper in the namespace as inactive
|
||||
for _, k := range m.Keepers {
|
||||
if k.namespace == namespace {
|
||||
k.active = k.name == keeperName
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) Update(now time.Time, newSecureValue *secretv1beta1.SecureValue) (*secretv1beta1.SecureValue, bool, error) {
|
||||
sv := m.ReadActiveVersion(newSecureValue.Namespace, newSecureValue.Name)
|
||||
if sv == nil {
|
||||
return nil, false, contracts.ErrSecureValueNotFound
|
||||
}
|
||||
|
||||
// If the keeper doesn't exist, return an error
|
||||
if !m.keeperExists(sv.Namespace, sv.Status.Keeper) {
|
||||
return nil, false, contracts.ErrKeeperNotFound
|
||||
}
|
||||
|
||||
// If the payload doesn't contain a value and it's not using a reference, get the value from current version
|
||||
if newSecureValue.Spec.Value == nil && newSecureValue.Spec.Ref == nil {
|
||||
// Tried to update a secure value without providing a new value or a ref
|
||||
if sv.Spec.Value == nil {
|
||||
return nil, false, contracts.ErrSecureValueMissingSecretAndRef
|
||||
}
|
||||
newSecureValue.Spec.Value = sv.Spec.Value
|
||||
}
|
||||
|
||||
createdSv, err := m.Create(now, newSecureValue)
|
||||
|
||||
return createdSv, true, err
|
||||
}
|
||||
|
||||
func (m *ModelGsm) Delete(namespace, name string) (*secretv1beta1.SecureValue, error) {
|
||||
modelSv := m.ReadActiveVersion(namespace, name)
|
||||
if modelSv == nil {
|
||||
return nil, contracts.ErrSecureValueNotFound
|
||||
}
|
||||
m.SetVersionToInactive(namespace, name, modelSv.Status.Version)
|
||||
return modelSv.SecureValue, nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) List(namespace string) (*secretv1beta1.SecureValueList, error) {
|
||||
out := make([]secretv1beta1.SecureValue, 0)
|
||||
|
||||
for _, v := range m.SecureValues {
|
||||
if v.Namespace == namespace && v.active {
|
||||
out = append(out, *v.SecureValue)
|
||||
}
|
||||
}
|
||||
|
||||
return &secretv1beta1.SecureValueList{Items: out}, nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) Decrypt(ctx context.Context, decrypter, namespace, name string) (map[string]decrypt.DecryptResult, error) {
|
||||
for _, v := range m.SecureValues {
|
||||
if v.Namespace == namespace &&
|
||||
v.Name == name &&
|
||||
v.active {
|
||||
if slices.ContainsFunc(v.Spec.Decrypters, func(d string) bool { return d == decrypter }) {
|
||||
switch {
|
||||
// It's a secure value that specifies the secret
|
||||
case v.Spec.Value != nil:
|
||||
return map[string]decrypt.DecryptResult{
|
||||
name: decrypt.NewDecryptResultValue(v.DeepCopy().Spec.Value),
|
||||
}, nil
|
||||
|
||||
// It's a secure value that references a secret on a 3rd party store
|
||||
case v.Spec.Ref != nil:
|
||||
keeper := m.findKeeper(v.Namespace, v.Status.Keeper)
|
||||
switch keeper.keeperType {
|
||||
case secretv1beta1.AWSKeeperType:
|
||||
exposedValue, err := m.modelSecretsManager.RetrieveReference(ctx, nil, *v.Spec.Ref)
|
||||
if err != nil {
|
||||
return map[string]decrypt.DecryptResult{
|
||||
name: decrypt.NewDecryptResultErr(fmt.Errorf("%w: %w", contracts.ErrDecryptFailed, err)),
|
||||
}, nil
|
||||
}
|
||||
return map[string]decrypt.DecryptResult{
|
||||
name: decrypt.NewDecryptResultValue(&exposedValue),
|
||||
}, nil
|
||||
|
||||
// Other keepers are not implemented so we default to the system keeper
|
||||
default:
|
||||
// The system keeper doesn't implement Reference so decryption always fails
|
||||
return map[string]decrypt.DecryptResult{
|
||||
name: decrypt.NewDecryptResultErr(contracts.ErrDecryptFailed),
|
||||
}, nil
|
||||
}
|
||||
|
||||
default:
|
||||
panic("bug: secure value where Spec.Value and Spec.Ref are nil")
|
||||
}
|
||||
}
|
||||
|
||||
return map[string]decrypt.DecryptResult{
|
||||
name: decrypt.NewDecryptResultErr(contracts.ErrDecryptNotAuthorized),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return map[string]decrypt.DecryptResult{
|
||||
name: decrypt.NewDecryptResultErr(contracts.ErrDecryptNotFound),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) Read(namespace, name string) (*secretv1beta1.SecureValue, error) {
|
||||
modelSv := m.ReadActiveVersion(namespace, name)
|
||||
if modelSv == nil {
|
||||
return nil, contracts.ErrSecureValueNotFound
|
||||
}
|
||||
return modelSv.SecureValue, nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) LeaseInactiveSecureValues(now time.Time, minAge, leaseTTL time.Duration, maxBatchSize uint16) ([]*ModelSecureValue, error) {
|
||||
out := make([]*ModelSecureValue, 0)
|
||||
|
||||
for _, sv := range m.SecureValues {
|
||||
if len(out) >= int(maxBatchSize) {
|
||||
break
|
||||
}
|
||||
if !sv.active && now.Sub(sv.created) > minAge && now.Sub(sv.leaseCreated) > leaseTTL {
|
||||
sv.leaseCreated = now
|
||||
out = append(out, sv)
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) CleanupInactiveSecureValues(now time.Time, minAge time.Duration, maxBatchSize uint16) ([]*ModelSecureValue, error) {
|
||||
// Using a slice to allow duplicates
|
||||
toDelete := make([]*ModelSecureValue, 0)
|
||||
|
||||
// The implementation query sorts by created time ascending
|
||||
slices.SortFunc(m.SecureValues, func(a, b *ModelSecureValue) int {
|
||||
if a.created.Before(b.created) {
|
||||
return -1
|
||||
} else if a.created.After(b.created) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
})
|
||||
|
||||
for _, sv := range m.SecureValues {
|
||||
if len(toDelete) >= int(maxBatchSize) {
|
||||
break
|
||||
}
|
||||
|
||||
if !sv.active && now.Sub(sv.created) > minAge {
|
||||
toDelete = append(toDelete, sv)
|
||||
}
|
||||
}
|
||||
|
||||
// PERF: The slices are always small
|
||||
m.SecureValues = slices.DeleteFunc(m.SecureValues, func(v1 *ModelSecureValue) bool {
|
||||
return slices.ContainsFunc(toDelete, func(v2 *ModelSecureValue) bool {
|
||||
return v2.UID == v1.UID
|
||||
})
|
||||
})
|
||||
|
||||
return toDelete, nil
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package testutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -144,8 +143,7 @@ func Setup(t *testing.T, opts ...func(*SetupConfig)) Sut {
|
||||
realMigrationExecutor, err := encryptionstorage.ProvideEncryptedValueMigrationExecutor(database, tracer, encryptedValueStorage, globalEncryptedValueStorage)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockAwsKeeper := NewModelSecretsManager()
|
||||
var keeperService contracts.KeeperService = newKeeperServiceWrapper(sqlKeeper, mockAwsKeeper)
|
||||
var keeperService contracts.KeeperService = newKeeperServiceWrapper(sqlKeeper)
|
||||
|
||||
if setupCfg.KeeperService != nil {
|
||||
keeperService = setupCfg.KeeperService
|
||||
@@ -192,7 +190,6 @@ func Setup(t *testing.T, opts ...func(*SetupConfig)) Sut {
|
||||
Clock: clock,
|
||||
KeeperService: keeperService,
|
||||
KeeperMetadataStorage: keeperMetadataStorage,
|
||||
ModelSecretsManager: mockAwsKeeper,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -215,8 +212,6 @@ type Sut struct {
|
||||
Clock *FakeClock
|
||||
KeeperService contracts.KeeperService
|
||||
KeeperMetadataStorage contracts.KeeperMetadataStorage
|
||||
// A mock of AWS secrets manager that implements contracts.Keeper
|
||||
ModelSecretsManager *ModelAWSSecretsManager
|
||||
}
|
||||
|
||||
type CreateSvConfig struct {
|
||||
@@ -265,54 +260,16 @@ func (s *Sut) DeleteSv(ctx context.Context, namespace, name string) (*secretv1be
|
||||
return sv, err
|
||||
}
|
||||
|
||||
type CreateKeeperConfig struct {
|
||||
// The default keeper payload. Mutate it to change which keeper ends up being created
|
||||
Keeper *secretv1beta1.Keeper
|
||||
}
|
||||
|
||||
func (s *Sut) CreateAWSKeeper(ctx context.Context) (*secretv1beta1.Keeper, error) {
|
||||
return s.CreateKeeper(ctx, func(cfg *CreateKeeperConfig) {
|
||||
cfg.Keeper.Spec = secretv1beta1.KeeperSpec{
|
||||
Aws: &secretv1beta1.KeeperAWSConfig{},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Sut) CreateKeeper(ctx context.Context, opts ...func(*CreateKeeperConfig)) (*secretv1beta1.Keeper, error) {
|
||||
cfg := CreateKeeperConfig{
|
||||
Keeper: &secretv1beta1.Keeper{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sv1",
|
||||
Namespace: "ns1",
|
||||
},
|
||||
Spec: secretv1beta1.KeeperSpec{
|
||||
Aws: &secretv1beta1.KeeperAWSConfig{},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(&cfg)
|
||||
}
|
||||
|
||||
return s.KeeperMetadataStorage.Create(ctx, cfg.Keeper, "actor-uid")
|
||||
}
|
||||
|
||||
type keeperServiceWrapper struct {
|
||||
sqlKeeper *sqlkeeper.SQLKeeper
|
||||
awsKeeper *ModelAWSSecretsManager
|
||||
keeper contracts.Keeper
|
||||
}
|
||||
|
||||
func newKeeperServiceWrapper(sqlKeeper *sqlkeeper.SQLKeeper, awsKeeper *ModelAWSSecretsManager) *keeperServiceWrapper {
|
||||
return &keeperServiceWrapper{sqlKeeper: sqlKeeper, awsKeeper: awsKeeper}
|
||||
func newKeeperServiceWrapper(keeper contracts.Keeper) *keeperServiceWrapper {
|
||||
return &keeperServiceWrapper{keeper: keeper}
|
||||
}
|
||||
|
||||
func (wrapper *keeperServiceWrapper) KeeperForConfig(cfg secretv1beta1.KeeperConfig) (contracts.Keeper, error) {
|
||||
switch cfg.(type) {
|
||||
case *secretv1beta1.NamedKeeperConfig[*secretv1beta1.KeeperAWSConfig]:
|
||||
return wrapper.awsKeeper, nil
|
||||
default:
|
||||
return wrapper.sqlKeeper, nil
|
||||
}
|
||||
return wrapper.keeper, nil
|
||||
}
|
||||
|
||||
func CreateUserAuthContext(ctx context.Context, namespace string, permissions map[string][]string) context.Context {
|
||||
@@ -433,113 +390,3 @@ type NoopMigrationExecutor struct {
|
||||
func (e *NoopMigrationExecutor) Execute(ctx context.Context) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// A mock of AWS secrets manager, used for testing.
|
||||
type ModelAWSSecretsManager struct {
|
||||
secrets map[string]entry
|
||||
alreadyDeleted map[string]bool
|
||||
}
|
||||
|
||||
type entry struct {
|
||||
exposedValueOrRef string
|
||||
externalID string
|
||||
}
|
||||
|
||||
func NewModelSecretsManager() *ModelAWSSecretsManager {
|
||||
return &ModelAWSSecretsManager{
|
||||
secrets: make(map[string]entry),
|
||||
alreadyDeleted: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ModelAWSSecretsManager) Store(ctx context.Context, cfg secretv1beta1.KeeperConfig, namespace xkube.Namespace, name string, version int64, exposedValueOrRef string) (externalID contracts.ExternalID, err error) {
|
||||
if exposedValueOrRef == "" {
|
||||
return "", fmt.Errorf("failed to satisfy constraint: Member must have length greater than or equal to 1")
|
||||
}
|
||||
|
||||
versionID := buildVersionID(namespace, name, version)
|
||||
if e, ok := m.secrets[versionID]; ok {
|
||||
// Ignore duplicated requests
|
||||
if e.exposedValueOrRef == exposedValueOrRef {
|
||||
return contracts.ExternalID(e.externalID), nil
|
||||
}
|
||||
|
||||
// Tried to create a secret that already exists
|
||||
return "", fmt.Errorf("ResourceExistsException: The operation failed because the secret %+v already exists", versionID)
|
||||
}
|
||||
|
||||
// First time creating the secret
|
||||
entry := entry{
|
||||
exposedValueOrRef: exposedValueOrRef,
|
||||
externalID: "external-id",
|
||||
}
|
||||
m.secrets[versionID] = entry
|
||||
|
||||
return contracts.ExternalID(entry.externalID), nil
|
||||
}
|
||||
|
||||
// Used to simulate the creation of secrets in the 3rd party secret store
|
||||
func (m *ModelAWSSecretsManager) Create(name, value string) {
|
||||
m.secrets[name] = entry{
|
||||
exposedValueOrRef: value,
|
||||
externalID: fmt.Sprintf("external_id_%+v", value),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ModelAWSSecretsManager) Expose(ctx context.Context, cfg secretv1beta1.KeeperConfig, namespace xkube.Namespace, name string, version int64) (exposedValue secretv1beta1.ExposedSecureValue, err error) {
|
||||
versionID := buildVersionID(namespace, name, version)
|
||||
|
||||
if m.deleted(versionID) {
|
||||
return "", fmt.Errorf("InvalidRequestException: You can't perform this operation on the secret because it was marked for deletion")
|
||||
}
|
||||
|
||||
entry, ok := m.secrets[versionID]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("ResourceNotFoundException: Secrets Manager can't find the specified secret")
|
||||
}
|
||||
|
||||
return secretv1beta1.ExposedSecureValue(entry.exposedValueOrRef), nil
|
||||
}
|
||||
|
||||
// TODO: this could be namespaced to make it more realistic
|
||||
func (m *ModelAWSSecretsManager) RetrieveReference(ctx context.Context, _ secretv1beta1.KeeperConfig, ref string) (secretv1beta1.ExposedSecureValue, error) {
|
||||
entry, ok := m.secrets[ref]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("ResourceNotFoundException: Secrets Manager can't find the specified secret")
|
||||
}
|
||||
return secretv1beta1.ExposedSecureValue(entry.exposedValueOrRef), nil
|
||||
}
|
||||
|
||||
func (m *ModelAWSSecretsManager) Delete(ctx context.Context, cfg secretv1beta1.KeeperConfig, namespace xkube.Namespace, name string, version int64) (err error) {
|
||||
versionID := buildVersionID(namespace, name, version)
|
||||
|
||||
// Deleting a secret that existed at some point is idempotent
|
||||
if m.deleted(versionID) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the secret is being deleted for the first time
|
||||
if m.exists(versionID) {
|
||||
m.delete(versionID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ModelAWSSecretsManager) deleted(versionID string) bool {
|
||||
return m.alreadyDeleted[versionID]
|
||||
}
|
||||
|
||||
func (m *ModelAWSSecretsManager) exists(versionID string) bool {
|
||||
_, ok := m.secrets[versionID]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (m *ModelAWSSecretsManager) delete(versionID string) {
|
||||
m.alreadyDeleted[versionID] = true
|
||||
delete(m.secrets, versionID)
|
||||
}
|
||||
|
||||
func buildVersionID(namespace xkube.Namespace, name string, version int64) string {
|
||||
return fmt.Sprintf("%s/%s/%d", namespace, name, version)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
@@ -11,17 +9,14 @@ import (
|
||||
|
||||
secretv1beta1 "github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/secret/contracts"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
)
|
||||
|
||||
type keeperValidator struct {
|
||||
features featuremgmt.FeatureToggles
|
||||
}
|
||||
type keeperValidator struct{}
|
||||
|
||||
var _ contracts.KeeperValidator = &keeperValidator{}
|
||||
|
||||
func ProvideKeeperValidator(features featuremgmt.FeatureToggles) contracts.KeeperValidator {
|
||||
return &keeperValidator{features: features}
|
||||
func ProvideKeeperValidator() contracts.KeeperValidator {
|
||||
return &keeperValidator{}
|
||||
}
|
||||
|
||||
func (v *keeperValidator) Validate(keeper *secretv1beta1.Keeper, oldKeeper *secretv1beta1.Keeper, operation admission.Operation) field.ErrorList {
|
||||
@@ -62,110 +57,51 @@ func (v *keeperValidator) Validate(keeper *secretv1beta1.Keeper, oldKeeper *secr
|
||||
}
|
||||
|
||||
if keeper.Spec.Aws != nil {
|
||||
//nolint
|
||||
if !v.features.IsEnabled(context.Background(), featuremgmt.FlagSecretsManagementAppPlatformAwsKeeper) {
|
||||
errs = append(errs,
|
||||
field.Forbidden(field.NewPath("spec", "aws"),
|
||||
fmt.Sprintf("enable aws keeper feature toggle to create aws keepers: %s", featuremgmt.FlagSecretsManagementAppPlatformAwsKeeper)))
|
||||
} else {
|
||||
errs = append(errs, validateAws(keeper.Spec.Aws)...)
|
||||
if err := validateCredentialValue(field.NewPath("spec", "aws", "accessKeyID"), keeper.Spec.Aws.AccessKeyID); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if err := validateCredentialValue(field.NewPath("spec", "aws", "secretAccessKey"), keeper.Spec.Aws.SecretAccessKey); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
if keeper.Spec.Azure != nil {
|
||||
errs = append(errs, validateAzure(keeper.Spec.Azure)...)
|
||||
if keeper.Spec.Azure.KeyVaultName == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "azure", "keyVaultName"), "a `keyVaultName` is required"))
|
||||
}
|
||||
|
||||
if keeper.Spec.Azure.TenantID == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "azure", "tenantID"), "a `tenantID` is required"))
|
||||
}
|
||||
|
||||
if keeper.Spec.Azure.ClientID == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "azure", "clientID"), "a `clientID` is required"))
|
||||
}
|
||||
|
||||
if err := validateCredentialValue(field.NewPath("spec", "azure", "clientSecret"), keeper.Spec.Azure.ClientSecret); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
if keeper.Spec.Gcp != nil {
|
||||
errs = append(errs, validateGcp(keeper.Spec.Gcp)...)
|
||||
if keeper.Spec.Gcp.ProjectID == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "gcp", "projectID"), "a `projectID` is required"))
|
||||
}
|
||||
|
||||
if keeper.Spec.Gcp.CredentialsFile == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "gcp", "credentialsFile"), "a `credentialsFile` is required"))
|
||||
}
|
||||
}
|
||||
|
||||
if keeper.Spec.HashiCorpVault != nil {
|
||||
errs = append(errs, validateHashiCorpVault(keeper.Spec.HashiCorpVault)...)
|
||||
}
|
||||
if keeper.Spec.HashiCorpVault.Address == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "hashiCorpVault", "address"), "an `address` is required"))
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func validateAws(cfg *secretv1beta1.KeeperAWSConfig) field.ErrorList {
|
||||
errs := make(field.ErrorList, 0)
|
||||
|
||||
if cfg.Region == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "aws", "region"), "region must be present"))
|
||||
}
|
||||
|
||||
switch {
|
||||
case cfg.AccessKey == nil && cfg.AssumeRole == nil:
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "aws"), "one of `accessKey` or `assumeRole` must be present"))
|
||||
|
||||
case cfg.AccessKey != nil && cfg.AssumeRole != nil:
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "aws"), "only one of `accessKey` or `assumeRole` can be present"))
|
||||
|
||||
case cfg.AccessKey != nil:
|
||||
if err := validateCredentialValue(field.NewPath("spec", "aws", "accessKey", "accessKeyID"), cfg.AccessKey.AccessKeyID); err != nil {
|
||||
if err := validateCredentialValue(field.NewPath("spec", "hashiCorpVault", "token"), keeper.Spec.HashiCorpVault.Token); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
if err := validateCredentialValue(field.NewPath("spec", "aws", "accessKey", "secretAccessKey"), cfg.AccessKey.SecretAccessKey); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
case cfg.AssumeRole != nil:
|
||||
if cfg.AssumeRole.AssumeRoleArn == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "aws", "assumeRole", "assumeRoleArn"), "arn of the role to assume must be present"))
|
||||
}
|
||||
if cfg.AssumeRole.ExternalID == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "aws", "assumeRole", "externalId"), "externalId must be present"))
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func validateAzure(cfg *secretv1beta1.KeeperAzureConfig) field.ErrorList {
|
||||
errs := make(field.ErrorList, 0)
|
||||
|
||||
if cfg.KeyVaultName == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "azure", "keyVaultName"), "a `keyVaultName` is required"))
|
||||
}
|
||||
|
||||
if cfg.TenantID == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "azure", "tenantID"), "a `tenantID` is required"))
|
||||
}
|
||||
|
||||
if cfg.ClientID == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "azure", "clientID"), "a `clientID` is required"))
|
||||
}
|
||||
|
||||
if err := validateCredentialValue(field.NewPath("spec", "azure", "clientSecret"), cfg.ClientSecret); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func validateGcp(cfg *secretv1beta1.KeeperGCPConfig) field.ErrorList {
|
||||
errs := make(field.ErrorList, 0)
|
||||
|
||||
if cfg.ProjectID == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "gcp", "projectID"), "a `projectID` is required"))
|
||||
}
|
||||
|
||||
if cfg.CredentialsFile == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "gcp", "credentialsFile"), "a `credentialsFile` is required"))
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func validateHashiCorpVault(cfg *secretv1beta1.KeeperHashiCorpConfig) field.ErrorList {
|
||||
errs := make(field.ErrorList, 0)
|
||||
|
||||
if cfg.Address == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "hashiCorpVault", "address"), "an `address` is required"))
|
||||
}
|
||||
|
||||
if err := validateCredentialValue(field.NewPath("spec", "hashiCorpVault", "token"), cfg.Token); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
return errs
|
||||
|
||||
@@ -10,12 +10,11 @@ import (
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
secretv1beta1 "github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
)
|
||||
|
||||
func TestValidateKeeper(t *testing.T) {
|
||||
objectMeta := metav1.ObjectMeta{Name: "test", Namespace: "test"}
|
||||
validator := ProvideKeeperValidator(featuremgmt.WithFeatures(featuremgmt.FlagSecretsManagementAppPlatformAwsKeeper))
|
||||
validator := ProvideKeeperValidator()
|
||||
|
||||
t.Run("when creating a new keeper", func(t *testing.T) {
|
||||
t.Run("the `description` must be present", func(t *testing.T) {
|
||||
@@ -23,12 +22,9 @@ func TestValidateKeeper(t *testing.T) {
|
||||
ObjectMeta: objectMeta,
|
||||
Spec: secretv1beta1.KeeperSpec{
|
||||
Aws: &secretv1beta1.KeeperAWSConfig{
|
||||
Region: "us-east-1",
|
||||
AccessKey: &secretv1beta1.KeeperAWSAccessKey{
|
||||
AccessKeyID: secretv1beta1.KeeperCredentialValue{ValueFromEnv: "some-value"},
|
||||
SecretAccessKey: secretv1beta1.KeeperCredentialValue{ValueFromEnv: "some-value"},
|
||||
},
|
||||
KmsKeyID: ptr.To("kms-key-id"),
|
||||
AccessKeyID: secretv1beta1.KeeperCredentialValue{ValueFromEnv: "some-value"},
|
||||
SecretAccessKey: secretv1beta1.KeeperCredentialValue{ValueFromEnv: "some-value"},
|
||||
KmsKeyID: ptr.To("kms-key-id"),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -45,42 +41,30 @@ func TestValidateKeeper(t *testing.T) {
|
||||
Spec: secretv1beta1.KeeperSpec{
|
||||
Description: "description",
|
||||
Aws: &secretv1beta1.KeeperAWSConfig{
|
||||
Region: "us-east-1",
|
||||
AccessKey: &secretv1beta1.KeeperAWSAccessKey{
|
||||
AccessKeyID: secretv1beta1.KeeperCredentialValue{
|
||||
ValueFromEnv: "some-value",
|
||||
},
|
||||
SecretAccessKey: secretv1beta1.KeeperCredentialValue{
|
||||
SecureValueName: "some-value",
|
||||
},
|
||||
AccessKeyID: secretv1beta1.KeeperCredentialValue{
|
||||
ValueFromEnv: "some-value",
|
||||
},
|
||||
SecretAccessKey: secretv1beta1.KeeperCredentialValue{
|
||||
SecureValueName: "some-value",
|
||||
},
|
||||
KmsKeyID: ptr.To("optional"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("aws keeper feature flag must be enabled", func(t *testing.T) {
|
||||
// Validator with feature disabled
|
||||
validator := ProvideKeeperValidator(featuremgmt.WithFeatures())
|
||||
errs := validator.Validate(validKeeperAWS.DeepCopy(), nil, admission.Create)
|
||||
require.Len(t, errs, 1)
|
||||
require.Equal(t, "spec.aws", errs[0].Field)
|
||||
require.Contains(t, errs[0].Detail, "secretsManagementAppPlatformAwsKeeper")
|
||||
})
|
||||
|
||||
t.Run("`accessKeyID` must be present", func(t *testing.T) {
|
||||
t.Run("at least one of the credential value must be present", func(t *testing.T) {
|
||||
keeper := validKeeperAWS.DeepCopy()
|
||||
keeper.Spec.Aws.AccessKey.AccessKeyID = secretv1beta1.KeeperCredentialValue{}
|
||||
keeper.Spec.Aws.AccessKeyID = secretv1beta1.KeeperCredentialValue{}
|
||||
|
||||
errs := validator.Validate(keeper, nil, admission.Create)
|
||||
require.Len(t, errs, 1)
|
||||
require.Equal(t, "spec.aws.accessKey.accessKeyID", errs[0].Field)
|
||||
require.Equal(t, "spec.aws.accessKeyID", errs[0].Field)
|
||||
})
|
||||
|
||||
t.Run("at most one of the credential value must be present", func(t *testing.T) {
|
||||
keeper := validKeeperAWS.DeepCopy()
|
||||
keeper.Spec.Aws.AccessKey.AccessKeyID = secretv1beta1.KeeperCredentialValue{
|
||||
keeper.Spec.Aws.AccessKeyID = secretv1beta1.KeeperCredentialValue{
|
||||
SecureValueName: "a",
|
||||
ValueFromEnv: "b",
|
||||
ValueFromConfig: "c",
|
||||
@@ -88,23 +72,23 @@ func TestValidateKeeper(t *testing.T) {
|
||||
|
||||
errs := validator.Validate(keeper, nil, admission.Create)
|
||||
require.Len(t, errs, 1)
|
||||
require.Equal(t, "spec.aws.accessKey.accessKeyID", errs[0].Field)
|
||||
require.Equal(t, "spec.aws.accessKeyID", errs[0].Field)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("`secretAccessKey` must be present", func(t *testing.T) {
|
||||
t.Run("at least one of the credential value must be present", func(t *testing.T) {
|
||||
keeper := validKeeperAWS.DeepCopy()
|
||||
keeper.Spec.Aws.AccessKey.SecretAccessKey = secretv1beta1.KeeperCredentialValue{}
|
||||
keeper.Spec.Aws.SecretAccessKey = secretv1beta1.KeeperCredentialValue{}
|
||||
|
||||
errs := validator.Validate(keeper, nil, admission.Create)
|
||||
require.Len(t, errs, 1)
|
||||
require.Equal(t, "spec.aws.accessKey.secretAccessKey", errs[0].Field)
|
||||
require.Equal(t, "spec.aws.secretAccessKey", errs[0].Field)
|
||||
})
|
||||
|
||||
t.Run("at most one of the credential value must be present", func(t *testing.T) {
|
||||
keeper := validKeeperAWS.DeepCopy()
|
||||
keeper.Spec.Aws.AccessKey.SecretAccessKey = secretv1beta1.KeeperCredentialValue{
|
||||
keeper.Spec.Aws.SecretAccessKey = secretv1beta1.KeeperCredentialValue{
|
||||
SecureValueName: "a",
|
||||
ValueFromEnv: "b",
|
||||
ValueFromConfig: "c",
|
||||
@@ -112,23 +96,7 @@ func TestValidateKeeper(t *testing.T) {
|
||||
|
||||
errs := validator.Validate(keeper, nil, admission.Create)
|
||||
require.Len(t, errs, 1)
|
||||
require.Equal(t, "spec.aws.accessKey.secretAccessKey", errs[0].Field)
|
||||
})
|
||||
|
||||
t.Run("only one of accessKey or assumeRole can be present", func(t *testing.T) {
|
||||
keeper := validKeeperAWS.DeepCopy()
|
||||
keeper.Spec.Aws.AccessKey.SecretAccessKey = secretv1beta1.KeeperCredentialValue{
|
||||
SecureValueName: "a",
|
||||
}
|
||||
keeper.Spec.Aws.AssumeRole = &secretv1beta1.KeeperAWSAssumeRole{
|
||||
AssumeRoleArn: "arn",
|
||||
ExternalID: "id",
|
||||
}
|
||||
|
||||
errs := validator.Validate(keeper, nil, admission.Create)
|
||||
require.Len(t, errs, 1)
|
||||
require.Equal(t, "spec.aws", errs[0].Field)
|
||||
require.Equal(t, "only one of `accessKey` or `assumeRole` can be present", errs[0].Detail)
|
||||
require.Equal(t, "spec.aws.secretAccessKey", errs[0].Field)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
package featuremgmt
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
ofrep "github.com/open-feature/go-sdk-contrib/providers/ofrep"
|
||||
"github.com/open-feature/go-sdk/openfeature"
|
||||
)
|
||||
|
||||
func newOFREPProvider(url string, client *http.Client) (openfeature.FeatureProvider, error) {
|
||||
options := []ofrep.Option{}
|
||||
if client != nil {
|
||||
options = append(options, ofrep.WithClient(client))
|
||||
}
|
||||
|
||||
return ofrep.NewProvider(url, options...), nil
|
||||
}
|
||||
@@ -19,11 +19,11 @@ const (
|
||||
|
||||
// OpenFeatureConfig holds configuration for initializing OpenFeature
|
||||
type OpenFeatureConfig struct {
|
||||
// ProviderType is either "static", "goff", or "ofrep"
|
||||
// ProviderType is either "static" or "goff"
|
||||
ProviderType string
|
||||
// URL is the GOFF or OFREP service URL (required for GOFF + OFREP providers)
|
||||
// URL is the GOFF service URL (required for GOFF provider)
|
||||
URL *url.URL
|
||||
// HTTPClient is a pre-configured HTTP client (optional, used for GOFF + OFREP providers)
|
||||
// HTTPClient is a pre-configured HTTP client (optional, used for GOFF provider)
|
||||
HTTPClient *http.Client
|
||||
// StaticFlags are the feature flags to use with static provider
|
||||
StaticFlags map[string]bool
|
||||
@@ -35,9 +35,9 @@ type OpenFeatureConfig struct {
|
||||
|
||||
// InitOpenFeature initializes OpenFeature with the provided configuration
|
||||
func InitOpenFeature(config OpenFeatureConfig) error {
|
||||
// For GOFF + OFREP providers, ensure we have a URL
|
||||
if (config.ProviderType == setting.GOFFProviderType || config.ProviderType == setting.OFREPProviderType) && (config.URL == nil || config.URL.String() == "") {
|
||||
return fmt.Errorf("URL is required for GOFF + OFREP providers")
|
||||
// For GOFF provider, ensure we have a URL
|
||||
if config.ProviderType == setting.GOFFProviderType && (config.URL == nil || config.URL.String() == "") {
|
||||
return fmt.Errorf("URL is required for GOFF provider")
|
||||
}
|
||||
|
||||
p, err := createProvider(config.ProviderType, config.URL, config.StaticFlags, config.HTTPClient)
|
||||
@@ -66,17 +66,13 @@ func InitOpenFeatureWithCfg(cfg *setting.Cfg) error {
|
||||
}
|
||||
|
||||
var httpcli *http.Client
|
||||
if cfg.OpenFeature.ProviderType == setting.GOFFProviderType || cfg.OpenFeature.ProviderType == setting.OFREPProviderType {
|
||||
var m *clientauthmiddleware.TokenExchangeMiddleware
|
||||
|
||||
if cfg.OpenFeature.ProviderType == setting.GOFFProviderType {
|
||||
m, err = clientauthmiddleware.NewTokenExchangeMiddleware(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create token exchange middleware: %w", err)
|
||||
}
|
||||
if cfg.OpenFeature.ProviderType == setting.GOFFProviderType {
|
||||
m, err := clientauthmiddleware.NewTokenExchangeMiddleware(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create token exchange middleware: %w", err)
|
||||
}
|
||||
|
||||
httpcli, err = createHTTPClient(m)
|
||||
httpcli, err = goffHTTPClient(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -103,35 +99,28 @@ func createProvider(
|
||||
staticFlags map[string]bool,
|
||||
httpClient *http.Client,
|
||||
) (openfeature.FeatureProvider, error) {
|
||||
if providerType == setting.GOFFProviderType || providerType == setting.OFREPProviderType {
|
||||
if u == nil || u.String() == "" {
|
||||
return nil, fmt.Errorf("feature provider url is required for GOFFProviderType + OFREPProviderType")
|
||||
}
|
||||
|
||||
if providerType == setting.GOFFProviderType {
|
||||
return newGOFFProvider(u.String(), httpClient)
|
||||
}
|
||||
|
||||
if providerType == setting.OFREPProviderType {
|
||||
return newOFREPProvider(u.String(), httpClient)
|
||||
}
|
||||
if providerType != setting.GOFFProviderType {
|
||||
return newStaticProvider(staticFlags)
|
||||
}
|
||||
|
||||
return newStaticProvider(staticFlags)
|
||||
if u == nil || u.String() == "" {
|
||||
return nil, fmt.Errorf("feature provider url is required for GOFFProviderType")
|
||||
}
|
||||
|
||||
return newGOFFProvider(u.String(), httpClient)
|
||||
}
|
||||
|
||||
func createHTTPClient(m *clientauthmiddleware.TokenExchangeMiddleware) (*http.Client, error) {
|
||||
options := sdkhttpclient.Options{
|
||||
func goffHTTPClient(m *clientauthmiddleware.TokenExchangeMiddleware) (*http.Client, error) {
|
||||
httpcli, err := sdkhttpclient.NewProvider().New(sdkhttpclient.Options{
|
||||
TLS: &sdkhttpclient.TLSOptions{InsecureSkipVerify: true},
|
||||
Timeouts: &sdkhttpclient.TimeoutOptions{
|
||||
Timeout: 10 * time.Second,
|
||||
},
|
||||
}
|
||||
if m != nil {
|
||||
options.Middlewares = append(options.Middlewares, m.New([]string{featuresProviderAudience}))
|
||||
}
|
||||
Middlewares: []sdkhttpclient.Middleware{
|
||||
m.New([]string{featuresProviderAudience}),
|
||||
},
|
||||
})
|
||||
|
||||
httpcli, err := sdkhttpclient.NewProvider().New(options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create http client for openfeature: %w", err)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"testing"
|
||||
|
||||
gofeatureflag "github.com/open-feature/go-sdk-contrib/providers/go-feature-flag/pkg"
|
||||
ofrep "github.com/open-feature/go-sdk-contrib/providers/ofrep"
|
||||
"github.com/open-feature/go-sdk/openfeature"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
@@ -61,15 +60,6 @@ func TestCreateProvider(t *testing.T) {
|
||||
expectedProvider: setting.GOFFProviderType,
|
||||
failSigning: true,
|
||||
},
|
||||
{
|
||||
name: "ofrep provider",
|
||||
cfg: setting.OpenFeatureSettings{
|
||||
ProviderType: setting.OFREPProviderType,
|
||||
URL: u,
|
||||
TargetingKey: "grafana",
|
||||
},
|
||||
expectedProvider: setting.OFREPProviderType,
|
||||
},
|
||||
{
|
||||
name: "invalid provider",
|
||||
cfg: setting.OpenFeatureSettings{
|
||||
@@ -106,24 +96,20 @@ func TestCreateProvider(t *testing.T) {
|
||||
}
|
||||
|
||||
tokenExchangeMiddleware := middleware.TestingTokenExchangeMiddleware(tokenExchangeClient)
|
||||
httpClient, err := createHTTPClient(tokenExchangeMiddleware)
|
||||
goffClient, err := goffHTTPClient(tokenExchangeMiddleware)
|
||||
require.NoError(t, err, "failed to create goff http client")
|
||||
provider, err := createProvider(tc.cfg.ProviderType, tc.cfg.URL, nil, httpClient)
|
||||
provider, err := createProvider(tc.cfg.ProviderType, tc.cfg.URL, nil, goffClient)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = openfeature.SetProviderAndWait(provider)
|
||||
require.NoError(t, err, "failed to set provider")
|
||||
|
||||
switch tc.expectedProvider {
|
||||
case setting.GOFFProviderType:
|
||||
if tc.expectedProvider == setting.GOFFProviderType {
|
||||
_, ok := provider.(*gofeatureflag.Provider)
|
||||
assert.True(t, ok, "expected provider to be of type goff.Provider")
|
||||
|
||||
testGoFFProvider(t, tc.failSigning)
|
||||
case setting.OFREPProviderType:
|
||||
_, ok := provider.(*ofrep.Provider)
|
||||
assert.True(t, ok, "expected provider to be of type ofrep.Provider")
|
||||
default:
|
||||
} else {
|
||||
_, ok := provider.(*inMemoryBulkProvider)
|
||||
assert.True(t, ok, "expected provider to be of type memprovider.InMemoryProvider")
|
||||
}
|
||||
|
||||
@@ -2082,14 +2082,6 @@ var (
|
||||
FrontendOnly: true,
|
||||
Owner: grafanaDataProSquad,
|
||||
},
|
||||
{
|
||||
Name: "secretsManagementAppPlatformAwsKeeper",
|
||||
Description: "Enables the creation of keepers that manage secrets stored on AWS secrets manager",
|
||||
Stage: FeatureStageExperimental,
|
||||
HideFromDocs: true,
|
||||
FrontendOnly: false,
|
||||
Owner: grafanaOperatorExperienceSquad,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
1
pkg/services/featuremgmt/toggles_gen.csv
generated
1
pkg/services/featuremgmt/toggles_gen.csv
generated
@@ -282,4 +282,3 @@ kubernetesAlertingHistorian,experimental,@grafana/alerting-squad,false,true,fals
|
||||
useMTPlugins,experimental,@grafana/plugins-platform-backend,false,false,true
|
||||
multiPropsVariables,experimental,@grafana/dashboards-squad,false,false,true
|
||||
smoothingTransformation,experimental,@grafana/datapro,false,false,true
|
||||
secretsManagementAppPlatformAwsKeeper,experimental,@grafana/grafana-operator-experience-squad,false,false,false
|
||||
|
||||
|
4
pkg/services/featuremgmt/toggles_gen.go
generated
4
pkg/services/featuremgmt/toggles_gen.go
generated
@@ -781,8 +781,4 @@ const (
|
||||
// FlagKubernetesAlertingHistorian
|
||||
// Adds support for Kubernetes alerting historian APIs
|
||||
FlagKubernetesAlertingHistorian = "kubernetesAlertingHistorian"
|
||||
|
||||
// FlagSecretsManagementAppPlatformAwsKeeper
|
||||
// Enables the creation of keepers that manage secrets stored on AWS secrets manager
|
||||
FlagSecretsManagementAppPlatformAwsKeeper = "secretsManagementAppPlatformAwsKeeper"
|
||||
)
|
||||
|
||||
16
pkg/services/featuremgmt/toggles_gen.json
generated
16
pkg/services/featuremgmt/toggles_gen.json
generated
@@ -3254,22 +3254,6 @@
|
||||
"codeowner": "@grafana/grafana-operator-experience-squad"
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"name": "secretsManagementAppPlatformAwsKeeper",
|
||||
"resourceVersion": "1767706420889",
|
||||
"creationTimestamp": "2026-01-06T12:55:50Z",
|
||||
"annotations": {
|
||||
"grafana.app/updatedTimestamp": "2026-01-06 13:33:40.889447 +0000 UTC"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"description": "Enables the creation of keepers that manage secrets stored on AWS secrets manager",
|
||||
"stage": "experimental",
|
||||
"codeowner": "@grafana/grafana-operator-experience-squad",
|
||||
"hideFromDocs": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"name": "secretsManagementAppPlatformUI",
|
||||
|
||||
@@ -647,6 +647,12 @@
|
||||
},
|
||||
"BacktestConfig": {
|
||||
"properties": {
|
||||
"annotations": {
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"condition": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -656,16 +662,8 @@
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"exec_err_state": {
|
||||
"enum": [
|
||||
"OK",
|
||||
"Alerting",
|
||||
"Error"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"for": {
|
||||
"type": "string"
|
||||
"$ref": "#/definitions/Duration"
|
||||
},
|
||||
"from": {
|
||||
"format": "date-time",
|
||||
@@ -674,22 +672,12 @@
|
||||
"interval": {
|
||||
"$ref": "#/definitions/Duration"
|
||||
},
|
||||
"keep_firing_for": {
|
||||
"type": "string"
|
||||
},
|
||||
"labels": {
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"missing_series_evals_to_resolve": {
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"namespace_uid": {
|
||||
"type": "string"
|
||||
},
|
||||
"no_data_state": {
|
||||
"enum": [
|
||||
"Alerting",
|
||||
@@ -698,18 +686,12 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"rule_group": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"type": "string"
|
||||
},
|
||||
"to": {
|
||||
"format": "date-time",
|
||||
"type": "string"
|
||||
},
|
||||
"uid": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1831,12 +1813,6 @@
|
||||
"interval": {
|
||||
"$ref": "#/definitions/Duration"
|
||||
},
|
||||
"labels": {
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"limit": {
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
@@ -1847,12 +1823,6 @@
|
||||
"query_offset": {
|
||||
"type": "string"
|
||||
},
|
||||
"remote_write": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/RemoteWriteConfig"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"rules": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/GettableExtendedRuleNode"
|
||||
@@ -3172,12 +3142,6 @@
|
||||
"interval": {
|
||||
"$ref": "#/definitions/Duration"
|
||||
},
|
||||
"labels": {
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"limit": {
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
@@ -3188,12 +3152,6 @@
|
||||
"query_offset": {
|
||||
"type": "string"
|
||||
},
|
||||
"remote_write": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/RemoteWriteConfig"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"rules": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/PostableExtendedRuleNode"
|
||||
@@ -3859,14 +3817,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"RemoteWriteConfig": {
|
||||
"properties": {
|
||||
"url": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ResponseDetails": {
|
||||
"properties": {
|
||||
"msg": {
|
||||
@@ -4143,12 +4093,6 @@
|
||||
"interval": {
|
||||
"$ref": "#/definitions/Duration"
|
||||
},
|
||||
"labels": {
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"limit": {
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
@@ -4159,12 +4103,6 @@
|
||||
"query_offset": {
|
||||
"type": "string"
|
||||
},
|
||||
"remote_write": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/RemoteWriteConfig"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"rules": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/GettableExtendedRuleNode"
|
||||
|
||||
@@ -284,20 +284,11 @@ type PostableRuleGroupConfig struct {
|
||||
|
||||
// fields below are used by Mimir/Loki rulers
|
||||
|
||||
SourceTenants []string `yaml:"source_tenants,omitempty" json:"source_tenants,omitempty"`
|
||||
EvaluationDelay *model.Duration `yaml:"evaluation_delay,omitempty" json:"evaluation_delay,omitempty"`
|
||||
QueryOffset *model.Duration `yaml:"query_offset,omitempty" json:"query_offset,omitempty"`
|
||||
AlignEvaluationTimeOnInterval bool `yaml:"align_evaluation_time_on_interval,omitempty" json:"align_evaluation_time_on_interval,omitempty"`
|
||||
Limit int `yaml:"limit,omitempty" json:"limit,omitempty"`
|
||||
Labels map[string]string `yaml:"labels,omitempty" json:"labels,omitempty"`
|
||||
|
||||
// GEM Ruler.
|
||||
|
||||
RWConfigs []RemoteWriteConfig `yaml:"remote_write,omitempty" json:"remote_write,omitempty"`
|
||||
}
|
||||
|
||||
type RemoteWriteConfig struct {
|
||||
URL string `yaml:"url,omitempty" json:"url,omitempty"`
|
||||
SourceTenants []string `yaml:"source_tenants,omitempty" json:"source_tenants,omitempty"`
|
||||
EvaluationDelay *model.Duration `yaml:"evaluation_delay,omitempty" json:"evaluation_delay,omitempty"`
|
||||
QueryOffset *model.Duration `yaml:"query_offset,omitempty" json:"query_offset,omitempty"`
|
||||
AlignEvaluationTimeOnInterval bool `yaml:"align_evaluation_time_on_interval,omitempty" json:"align_evaluation_time_on_interval,omitempty"`
|
||||
Limit int `yaml:"limit,omitempty" json:"limit,omitempty"`
|
||||
}
|
||||
|
||||
func (c *PostableRuleGroupConfig) UnmarshalJSON(b []byte) error {
|
||||
@@ -337,8 +328,8 @@ func (c *PostableRuleGroupConfig) validate() error {
|
||||
return fmt.Errorf("cannot mix Grafana & Prometheus style rules")
|
||||
}
|
||||
|
||||
if hasGrafRules && (len(c.SourceTenants) > 0 || c.EvaluationDelay != nil || c.QueryOffset != nil || c.AlignEvaluationTimeOnInterval || c.Limit > 0 || len(c.Labels) > 0 || len(c.RWConfigs) > 0) {
|
||||
return fmt.Errorf("fields source_tenants, evaluation_delay, query_offset, align_evaluation_time_on_interval, limit, labels, and remote_write are not supported for Grafana rules")
|
||||
if hasGrafRules && (len(c.SourceTenants) > 0 || c.EvaluationDelay != nil || c.QueryOffset != nil || c.AlignEvaluationTimeOnInterval || c.Limit > 0) {
|
||||
return fmt.Errorf("fields source_tenants, evaluation_delay, query_offset, align_evaluation_time_on_interval and limit are not supported for Grafana rules")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -354,16 +345,11 @@ type GettableRuleGroupConfig struct {
|
||||
|
||||
// fields below are used by Mimir/Loki rulers
|
||||
|
||||
SourceTenants []string `yaml:"source_tenants,omitempty" json:"source_tenants,omitempty"`
|
||||
EvaluationDelay *model.Duration `yaml:"evaluation_delay,omitempty" json:"evaluation_delay,omitempty"`
|
||||
QueryOffset *model.Duration `yaml:"query_offset,omitempty" json:"query_offset,omitempty"`
|
||||
AlignEvaluationTimeOnInterval bool `yaml:"align_evaluation_time_on_interval,omitempty" json:"align_evaluation_time_on_interval,omitempty"`
|
||||
Limit int `yaml:"limit,omitempty" json:"limit,omitempty"`
|
||||
Labels map[string]string `yaml:"labels,omitempty" json:"labels,omitempty"`
|
||||
|
||||
// GEM Ruler.
|
||||
|
||||
RWConfigs []RemoteWriteConfig `yaml:"remote_write,omitempty" json:"remote_write,omitempty"`
|
||||
SourceTenants []string `yaml:"source_tenants,omitempty" json:"source_tenants,omitempty"`
|
||||
EvaluationDelay *model.Duration `yaml:"evaluation_delay,omitempty" json:"evaluation_delay,omitempty"`
|
||||
QueryOffset *model.Duration `yaml:"query_offset,omitempty" json:"query_offset,omitempty"`
|
||||
AlignEvaluationTimeOnInterval bool `yaml:"align_evaluation_time_on_interval,omitempty" json:"align_evaluation_time_on_interval,omitempty"`
|
||||
Limit int `yaml:"limit,omitempty" json:"limit,omitempty"`
|
||||
}
|
||||
|
||||
func (c *GettableRuleGroupConfig) UnmarshalJSON(b []byte) error {
|
||||
|
||||
@@ -647,6 +647,12 @@
|
||||
},
|
||||
"BacktestConfig": {
|
||||
"properties": {
|
||||
"annotations": {
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"condition": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -656,16 +662,8 @@
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"exec_err_state": {
|
||||
"enum": [
|
||||
"OK",
|
||||
"Alerting",
|
||||
"Error"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"for": {
|
||||
"type": "string"
|
||||
"$ref": "#/definitions/Duration"
|
||||
},
|
||||
"from": {
|
||||
"format": "date-time",
|
||||
@@ -674,22 +672,12 @@
|
||||
"interval": {
|
||||
"$ref": "#/definitions/Duration"
|
||||
},
|
||||
"keep_firing_for": {
|
||||
"type": "string"
|
||||
},
|
||||
"labels": {
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"missing_series_evals_to_resolve": {
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
},
|
||||
"namespace_uid": {
|
||||
"type": "string"
|
||||
},
|
||||
"no_data_state": {
|
||||
"enum": [
|
||||
"Alerting",
|
||||
@@ -698,18 +686,12 @@
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"rule_group": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"type": "string"
|
||||
},
|
||||
"to": {
|
||||
"format": "date-time",
|
||||
"type": "string"
|
||||
},
|
||||
"uid": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1831,12 +1813,6 @@
|
||||
"interval": {
|
||||
"$ref": "#/definitions/Duration"
|
||||
},
|
||||
"labels": {
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"limit": {
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
@@ -1847,12 +1823,6 @@
|
||||
"query_offset": {
|
||||
"type": "string"
|
||||
},
|
||||
"remote_write": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/RemoteWriteConfig"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"rules": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/GettableExtendedRuleNode"
|
||||
@@ -3172,12 +3142,6 @@
|
||||
"interval": {
|
||||
"$ref": "#/definitions/Duration"
|
||||
},
|
||||
"labels": {
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"limit": {
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
@@ -3188,12 +3152,6 @@
|
||||
"query_offset": {
|
||||
"type": "string"
|
||||
},
|
||||
"remote_write": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/RemoteWriteConfig"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"rules": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/PostableExtendedRuleNode"
|
||||
@@ -3859,14 +3817,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"RemoteWriteConfig": {
|
||||
"properties": {
|
||||
"url": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ResponseDetails": {
|
||||
"properties": {
|
||||
"msg": {
|
||||
@@ -4143,12 +4093,6 @@
|
||||
"interval": {
|
||||
"$ref": "#/definitions/Duration"
|
||||
},
|
||||
"labels": {
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"limit": {
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
@@ -4159,12 +4103,6 @@
|
||||
"query_offset": {
|
||||
"type": "string"
|
||||
},
|
||||
"remote_write": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/RemoteWriteConfig"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"rules": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/GettableExtendedRuleNode"
|
||||
|
||||
@@ -5072,6 +5072,12 @@
|
||||
"BacktestConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"annotations": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"condition": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -5081,16 +5087,8 @@
|
||||
"$ref": "#/definitions/AlertQuery"
|
||||
}
|
||||
},
|
||||
"exec_err_state": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"OK",
|
||||
"Alerting",
|
||||
"Error"
|
||||
]
|
||||
},
|
||||
"for": {
|
||||
"type": "string"
|
||||
"$ref": "#/definitions/Duration"
|
||||
},
|
||||
"from": {
|
||||
"type": "string",
|
||||
@@ -5099,22 +5097,12 @@
|
||||
"interval": {
|
||||
"$ref": "#/definitions/Duration"
|
||||
},
|
||||
"keep_firing_for": {
|
||||
"type": "string"
|
||||
},
|
||||
"labels": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"missing_series_evals_to_resolve": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"namespace_uid": {
|
||||
"type": "string"
|
||||
},
|
||||
"no_data_state": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
@@ -5123,18 +5111,12 @@
|
||||
"OK"
|
||||
]
|
||||
},
|
||||
"rule_group": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"type": "string"
|
||||
},
|
||||
"to": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"uid": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -6257,12 +6239,6 @@
|
||||
"interval": {
|
||||
"$ref": "#/definitions/Duration"
|
||||
},
|
||||
"labels": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
@@ -6273,12 +6249,6 @@
|
||||
"query_offset": {
|
||||
"type": "string"
|
||||
},
|
||||
"remote_write": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/RemoteWriteConfig"
|
||||
}
|
||||
},
|
||||
"rules": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -7599,12 +7569,6 @@
|
||||
"interval": {
|
||||
"$ref": "#/definitions/Duration"
|
||||
},
|
||||
"labels": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
@@ -7615,12 +7579,6 @@
|
||||
"query_offset": {
|
||||
"type": "string"
|
||||
},
|
||||
"remote_write": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/RemoteWriteConfig"
|
||||
}
|
||||
},
|
||||
"rules": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -8285,14 +8243,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"RemoteWriteConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"url": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"ResponseDetails": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -8570,12 +8520,6 @@
|
||||
"interval": {
|
||||
"$ref": "#/definitions/Duration"
|
||||
},
|
||||
"labels": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
@@ -8586,12 +8530,6 @@
|
||||
"query_offset": {
|
||||
"type": "string"
|
||||
},
|
||||
"remote_write": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/RemoteWriteConfig"
|
||||
}
|
||||
},
|
||||
"rules": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
|
||||
@@ -73,21 +73,21 @@ func TestBuildLabelMatcherJSON(t *testing.T) {
|
||||
name: "MySQL MatchEqual with non-empty value",
|
||||
dialect: migrator.NewMysqlDialect(),
|
||||
matcher: &labels.Matcher{Type: labels.MatchEqual, Name: "team", Value: "alerting"},
|
||||
wantSQL: `JSON_UNQUOTE(JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$."', ?, '"'))) = ?`,
|
||||
wantSQL: "JSON_UNQUOTE(JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$.', ?))) = ?",
|
||||
wantArgs: []any{"team", "alerting"},
|
||||
},
|
||||
{
|
||||
name: "MySQL MatchEqual with empty value",
|
||||
dialect: migrator.NewMysqlDialect(),
|
||||
matcher: &labels.Matcher{Type: labels.MatchEqual, Name: "team", Value: ""},
|
||||
wantSQL: `(JSON_UNQUOTE(JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$."', ?, '"'))) = ? OR JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$."', ?, '"')) IS NULL)`,
|
||||
wantSQL: "(JSON_UNQUOTE(JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$.', ?))) = ? OR JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$.', ?)) IS NULL)",
|
||||
wantArgs: []any{"team", "", "team"},
|
||||
},
|
||||
{
|
||||
name: "MySQL MatchNotEqual",
|
||||
dialect: migrator.NewMysqlDialect(),
|
||||
matcher: &labels.Matcher{Type: labels.MatchNotEqual, Name: "team", Value: "alerting"},
|
||||
wantSQL: `(JSON_UNQUOTE(JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$."', ?, '"'))) IS NULL OR JSON_UNQUOTE(JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$."', ?, '"'))) != ?)`,
|
||||
wantSQL: "(JSON_UNQUOTE(JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$.', ?))) IS NULL OR JSON_UNQUOTE(JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$.', ?))) != ?)",
|
||||
wantArgs: []any{"team", "team", "alerting"},
|
||||
},
|
||||
{
|
||||
@@ -149,7 +149,7 @@ func TestBuildLabelKeyExistsCondition(t *testing.T) {
|
||||
dialect: migrator.NewMysqlDialect(),
|
||||
column: "labels",
|
||||
key: "__grafana_origin",
|
||||
wantSQL: `JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$."', ?, '"')) IS NOT NULL`,
|
||||
wantSQL: "JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$.', ?)) IS NOT NULL",
|
||||
wantArgs: []any{"__grafana_origin"},
|
||||
},
|
||||
{
|
||||
@@ -194,7 +194,7 @@ func TestBuildLabelKeyMissingCondition(t *testing.T) {
|
||||
dialect: migrator.NewMysqlDialect(),
|
||||
column: "labels",
|
||||
key: "__grafana_origin",
|
||||
wantSQL: `JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$."', ?, '"')) IS NULL`,
|
||||
wantSQL: "JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$.', ?)) IS NULL",
|
||||
wantArgs: []any{"__grafana_origin"},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -2454,7 +2454,7 @@ func TestIntegration_ListAlertRules(t *testing.T) {
|
||||
ruleGen.WithLabels(map[string]string{"glob": "*[?]"}),
|
||||
ruleGen.WithTitle("rule_glob")))
|
||||
ruleSpecialChars := createRule(t, store, ruleGen.With(
|
||||
ruleGen.WithLabels(map[string]string{"label-with-hyphen": "line1\nline2\\end\"quote"}),
|
||||
ruleGen.WithLabels(map[string]string{"json": "line1\nline2\\end\"quote"}),
|
||||
ruleGen.WithTitle("rule_special_chars")))
|
||||
ruleEmpty := createRule(t, store, ruleGen.With(
|
||||
ruleGen.WithLabels(map[string]string{"empty": ""}),
|
||||
@@ -2531,7 +2531,7 @@ func TestIntegration_ListAlertRules(t *testing.T) {
|
||||
name: "JSON escape characters are handled correctly",
|
||||
labelMatchers: labels.Matchers{
|
||||
func() *labels.Matcher {
|
||||
m, _ := labels.NewMatcher(labels.MatchEqual, "label-with-hyphen", "line1\nline2\\end\"quote")
|
||||
m, _ := labels.NewMatcher(labels.MatchEqual, "json", "line1\nline2\\end\"quote")
|
||||
return m
|
||||
}(),
|
||||
},
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
func jsonEquals(dialect migrator.Dialect, column, key, value string) (string, []any) {
|
||||
switch dialect.DriverName() {
|
||||
case migrator.MySQL:
|
||||
return fmt.Sprintf(`JSON_UNQUOTE(JSON_EXTRACT(NULLIF(%s, ''), CONCAT('$."', ?, '"'))) = ?`, column), []any{key, value}
|
||||
return fmt.Sprintf("JSON_UNQUOTE(JSON_EXTRACT(NULLIF(%s, ''), CONCAT('$.', ?))) = ?", column), []any{key, value}
|
||||
case migrator.Postgres:
|
||||
return fmt.Sprintf("jsonb_extract_path_text(NULLIF(%s, '')::jsonb, ?) = ?", column), []any{key, value}
|
||||
default:
|
||||
@@ -25,7 +25,7 @@ func jsonNotEquals(dialect migrator.Dialect, column, key, value string) (string,
|
||||
var jx string
|
||||
switch dialect.DriverName() {
|
||||
case migrator.MySQL:
|
||||
jx = fmt.Sprintf(`JSON_UNQUOTE(JSON_EXTRACT(NULLIF(%s, ''), CONCAT('$."', ?, '"')))`, column)
|
||||
jx = fmt.Sprintf("JSON_UNQUOTE(JSON_EXTRACT(NULLIF(%s, ''), CONCAT('$.', ?)))", column)
|
||||
case migrator.Postgres:
|
||||
jx = fmt.Sprintf("jsonb_extract_path_text(NULLIF(%s, '')::jsonb, ?)", column)
|
||||
default:
|
||||
@@ -49,7 +49,7 @@ func jsonKeyCondition(dialect migrator.Dialect, column, key string, exists bool)
|
||||
}
|
||||
switch dialect.DriverName() {
|
||||
case migrator.MySQL:
|
||||
return fmt.Sprintf(`JSON_EXTRACT(NULLIF(%s, ''), CONCAT('$."', ?, '"')) %s`, column, nullCheck), []any{key}, nil
|
||||
return fmt.Sprintf("JSON_EXTRACT(NULLIF(%s, ''), CONCAT('$.', ?)) %s", column, nullCheck), []any{key}, nil
|
||||
case migrator.Postgres:
|
||||
return fmt.Sprintf("jsonb_extract_path_text(NULLIF(%s, '')::jsonb, ?) %s", column, nullCheck), []any{key}, nil
|
||||
default:
|
||||
|
||||
@@ -23,7 +23,7 @@ func TestJsonEquals(t *testing.T) {
|
||||
column: "labels",
|
||||
key: "team",
|
||||
value: "alerting",
|
||||
wantSQL: `JSON_UNQUOTE(JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$."', ?, '"'))) = ?`,
|
||||
wantSQL: "JSON_UNQUOTE(JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$.', ?))) = ?",
|
||||
wantArgs: []any{"team", "alerting"},
|
||||
},
|
||||
{
|
||||
@@ -62,7 +62,7 @@ func TestJsonNotEquals(t *testing.T) {
|
||||
column: "labels",
|
||||
key: "team",
|
||||
value: "alerting",
|
||||
wantSQL: `(JSON_UNQUOTE(JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$."', ?, '"'))) IS NULL OR JSON_UNQUOTE(JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$."', ?, '"'))) != ?)`,
|
||||
wantSQL: "(JSON_UNQUOTE(JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$.', ?))) IS NULL OR JSON_UNQUOTE(JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$.', ?))) != ?)",
|
||||
wantArgs: []any{"team", "team", "alerting"},
|
||||
},
|
||||
{
|
||||
@@ -99,7 +99,7 @@ func TestJsonKeyMissing(t *testing.T) {
|
||||
dialect: migrator.NewMysqlDialect(),
|
||||
column: "labels",
|
||||
key: "team",
|
||||
wantSQL: `JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$."', ?, '"')) IS NULL`,
|
||||
wantSQL: "JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$.', ?)) IS NULL",
|
||||
wantArgs: []any{"team"},
|
||||
},
|
||||
{
|
||||
@@ -136,7 +136,7 @@ func TestJsonKeyExists(t *testing.T) {
|
||||
dialect: migrator.NewMysqlDialect(),
|
||||
column: "labels",
|
||||
key: "__grafana_origin",
|
||||
wantSQL: `JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$."', ?, '"')) IS NOT NULL`,
|
||||
wantSQL: "JSON_EXTRACT(NULLIF(labels, ''), CONCAT('$.', ?)) IS NOT NULL",
|
||||
wantArgs: []any{"__grafana_origin"},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -55,7 +55,6 @@ import (
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/plugininstaller"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginsettings"
|
||||
pluginSettings "github.com/grafana/grafana/pkg/services/pluginsintegration/pluginsettings/service"
|
||||
_ "github.com/grafana/grafana/pkg/services/pluginsintegration/pluginslog" // Initialize plugin logger
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginsources"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginsso"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore"
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
package pluginslog
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
pluginslog "github.com/grafana/grafana/pkg/plugins/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Register Grafana's logger implementation for pkg/plugins
|
||||
pluginslog.SetLoggerFactory(func(name string) pluginslog.Logger {
|
||||
return &grafanaInfraLogWrapper{
|
||||
l: log.New(name),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type grafanaInfraLogWrapper struct {
|
||||
l *log.ConcreteLogger
|
||||
}
|
||||
|
||||
func (d *grafanaInfraLogWrapper) New(ctx ...any) pluginslog.Logger {
|
||||
if len(ctx) == 0 {
|
||||
return &grafanaInfraLogWrapper{
|
||||
l: d.l.New(),
|
||||
}
|
||||
}
|
||||
|
||||
return &grafanaInfraLogWrapper{
|
||||
l: d.l.New(ctx...),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *grafanaInfraLogWrapper) Debug(msg string, ctx ...any) {
|
||||
d.l.Debug(msg, ctx...)
|
||||
}
|
||||
|
||||
func (d *grafanaInfraLogWrapper) Info(msg string, ctx ...any) {
|
||||
d.l.Info(msg, ctx...)
|
||||
}
|
||||
|
||||
func (d *grafanaInfraLogWrapper) Warn(msg string, ctx ...any) {
|
||||
d.l.Warn(msg, ctx...)
|
||||
}
|
||||
|
||||
func (d *grafanaInfraLogWrapper) Error(msg string, ctx ...any) {
|
||||
d.l.Error(msg, ctx...)
|
||||
}
|
||||
|
||||
func (d *grafanaInfraLogWrapper) FromContext(ctx context.Context) pluginslog.Logger {
|
||||
concreteInfraLogger, ok := d.l.FromContext(ctx).(*log.ConcreteLogger)
|
||||
if !ok {
|
||||
return d.New()
|
||||
}
|
||||
return &grafanaInfraLogWrapper{
|
||||
l: concreteInfraLogger,
|
||||
}
|
||||
}
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
const (
|
||||
StaticProviderType = "static"
|
||||
GOFFProviderType = "goff"
|
||||
OFREPProviderType = "ofrep"
|
||||
)
|
||||
|
||||
type OpenFeatureSettings struct {
|
||||
@@ -34,7 +33,7 @@ func (cfg *Cfg) readOpenFeatureSettings() error {
|
||||
|
||||
cfg.OpenFeature.TargetingKey = config.Key("targetingKey").MustString(defaultTargetingKey)
|
||||
|
||||
if strURL != "" && (cfg.OpenFeature.ProviderType == GOFFProviderType || cfg.OpenFeature.ProviderType == OFREPProviderType) {
|
||||
if strURL != "" && cfg.OpenFeature.ProviderType == GOFFProviderType {
|
||||
u, err := url.Parse(strURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid feature provider url: %w", err)
|
||||
|
||||
@@ -40,10 +40,6 @@ type SecretsManagerSettings struct {
|
||||
RunSecretsDBMigrations bool
|
||||
// Whether to run the data key id migration. Requires that RunSecretsDBMigrations is also true.
|
||||
RunDataKeyMigration bool
|
||||
|
||||
// AWS Keeper
|
||||
AWSKeeperAccessKeyID string
|
||||
AWSKeeperSecretAccessKey string
|
||||
}
|
||||
|
||||
func (cfg *Cfg) readSecretsManagerSettings() {
|
||||
@@ -67,9 +63,6 @@ func (cfg *Cfg) readSecretsManagerSettings() {
|
||||
cfg.SecretsManagement.RunSecretsDBMigrations = secretsMgmt.Key("run_secrets_db_migrations").MustBool(true)
|
||||
cfg.SecretsManagement.RunDataKeyMigration = secretsMgmt.Key("run_data_key_migration").MustBool(true)
|
||||
|
||||
cfg.SecretsManagement.AWSKeeperAccessKeyID = secretsMgmt.Key("aws_access_key_id").MustString("")
|
||||
cfg.SecretsManagement.AWSKeeperSecretAccessKey = secretsMgmt.Key("aws_secret_access_key").MustString("")
|
||||
|
||||
// Extract available KMS providers from configuration sections
|
||||
providers := make(map[string]map[string]string)
|
||||
for _, section := range cfg.Raw.Sections() {
|
||||
|
||||
@@ -145,14 +145,6 @@ func (s *decryptStorage) Decrypt(ctx context.Context, namespace xkube.Namespace,
|
||||
return "", fmt.Errorf("failed to get keeper for config: %v (%w)", err, contracts.ErrDecryptFailed)
|
||||
}
|
||||
|
||||
if sv.Spec.Ref != nil {
|
||||
exposedValue, err := keeper.RetrieveReference(ctx, keeperConfig, *sv.Spec.Ref)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to expose secret using reference: %v (%w)", err, contracts.ErrDecryptFailed)
|
||||
}
|
||||
return exposedValue, nil
|
||||
}
|
||||
|
||||
exposedValue, err := keeper.Expose(ctx, keeperConfig, namespace, name, sv.Status.Version)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to expose secret: %v (%w)", err, contracts.ErrDecryptFailed)
|
||||
|
||||
@@ -9,14 +9,12 @@ import (
|
||||
"github.com/grafana/grafana-app-sdk/logging"
|
||||
"github.com/stretchr/testify/require"
|
||||
grpcmetadata "google.golang.org/grpc/metadata"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
secretv1beta1 "github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/identity"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/secret/contracts"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/secret/testutils"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/secret/xkube"
|
||||
"github.com/grafana/grafana/pkg/util/testutil"
|
||||
)
|
||||
|
||||
@@ -326,66 +324,6 @@ func TestIntegrationDecrypt(t *testing.T) {
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("happy path, referencing a secret in a 3rd party store", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
||||
tokenSvcIdentity := "svc"
|
||||
stSvcIdentity := "st-svc"
|
||||
|
||||
// Create auth context with proper permissions that match the decrypters
|
||||
authCtx := createAuthContext(ctx, "default", []string{"secret.grafana.app/securevalues:decrypt"}, tokenSvcIdentity, types.TypeUser)
|
||||
|
||||
// Needs to be incoming because we are pretending we received the metadata from a gRPC request
|
||||
ctx = grpcmetadata.NewIncomingContext(authCtx, grpcmetadata.New(map[string]string{
|
||||
contracts.HeaderGrafanaServiceIdentityName: stSvcIdentity,
|
||||
}))
|
||||
|
||||
// Setup service
|
||||
sut := testutils.Setup(t)
|
||||
|
||||
// Create a secret on the 3rd party secret store
|
||||
sut.ModelSecretsManager.Create("ref1", "value")
|
||||
|
||||
// Create a 3rd party keeper
|
||||
keeper, err := sut.KeeperMetadataStorage.Create(t.Context(), &secretv1beta1.Keeper{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "k1",
|
||||
},
|
||||
Spec: secretv1beta1.KeeperSpec{
|
||||
Aws: &secretv1beta1.KeeperAWSConfig{},
|
||||
},
|
||||
}, "actor-uid")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, sut.KeeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(keeper.Namespace), keeper.Name))
|
||||
|
||||
// Create a secure value
|
||||
sv := &secretv1beta1.SecureValue{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "sv-test",
|
||||
},
|
||||
Spec: secretv1beta1.SecureValueSpec{
|
||||
Description: "description",
|
||||
Decrypters: []string{tokenSvcIdentity},
|
||||
Ref: ptr.To("ref1"),
|
||||
}}
|
||||
|
||||
_, err = sut.CreateSv(ctx, testutils.CreateSvWithSv(sv))
|
||||
require.NoError(t, err)
|
||||
|
||||
fakeLogger := &mockLogger{}
|
||||
|
||||
loggerCtx := logging.Context(ctx, fakeLogger)
|
||||
|
||||
exposed, err := sut.DecryptStorage.Decrypt(loggerCtx, "default", "sv-test")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "value", exposed.DangerouslyExposeAndConsumeValue())
|
||||
})
|
||||
}
|
||||
|
||||
func createAuthContext(ctx context.Context, namespace string, permissions []string, svc string, identityType types.IdentityType) context.Context {
|
||||
|
||||
@@ -59,16 +59,16 @@ func (kp *keeperDB) toKubernetes() (*secretv1beta1.Keeper, error) {
|
||||
}
|
||||
|
||||
// Obtain provider configs
|
||||
provider := parseKeeperConfigJson(kp.Name, secretv1beta1.KeeperType(kp.Type), kp.Payload)
|
||||
provider := toProvider(secretv1beta1.KeeperType(kp.Type), kp.Payload)
|
||||
switch v := provider.(type) {
|
||||
case *secretv1beta1.NamedKeeperConfig[*secretv1beta1.KeeperAWSConfig]:
|
||||
resource.Spec.Aws = v.Cfg
|
||||
case *secretv1beta1.NamedKeeperConfig[*secretv1beta1.KeeperAzureConfig]:
|
||||
resource.Spec.Azure = v.Cfg
|
||||
case *secretv1beta1.NamedKeeperConfig[*secretv1beta1.KeeperGCPConfig]:
|
||||
resource.Spec.Gcp = v.Cfg
|
||||
case *secretv1beta1.NamedKeeperConfig[*secretv1beta1.KeeperHashiCorpConfig]:
|
||||
resource.Spec.HashiCorpVault = v.Cfg
|
||||
case *secretv1beta1.KeeperAWSConfig:
|
||||
resource.Spec.Aws = v
|
||||
case *secretv1beta1.KeeperAzureConfig:
|
||||
resource.Spec.Azure = v
|
||||
case *secretv1beta1.KeeperGCPConfig:
|
||||
resource.Spec.Gcp = v
|
||||
case *secretv1beta1.KeeperHashiCorpConfig:
|
||||
resource.Spec.HashiCorpVault = v
|
||||
}
|
||||
|
||||
// Set all meta fields here for consistency.
|
||||
@@ -214,34 +214,34 @@ func toTypeAndPayload(kp *secretv1beta1.Keeper) (secretv1beta1.KeeperType, strin
|
||||
return "", "", fmt.Errorf("no keeper type found")
|
||||
}
|
||||
|
||||
// parseKeeperConfigJson maps a KeeperType and payload into a provider config struct.
|
||||
// toProvider maps a KeeperType and payload into a provider config struct.
|
||||
// TODO: Move as method of KeeperType
|
||||
func parseKeeperConfigJson(keeperName string, keeperType secretv1beta1.KeeperType, payload string) secretv1beta1.KeeperConfig {
|
||||
func toProvider(keeperType secretv1beta1.KeeperType, payload string) secretv1beta1.KeeperConfig {
|
||||
switch keeperType {
|
||||
case secretv1beta1.AWSKeeperType:
|
||||
aws := &secretv1beta1.KeeperAWSConfig{}
|
||||
if err := json.Unmarshal([]byte(payload), aws); err != nil {
|
||||
return nil
|
||||
}
|
||||
return secretv1beta1.NewNamedKeeperConfig(keeperName, aws)
|
||||
return aws
|
||||
case secretv1beta1.AzureKeeperType:
|
||||
azure := &secretv1beta1.KeeperAzureConfig{}
|
||||
if err := json.Unmarshal([]byte(payload), azure); err != nil {
|
||||
return nil
|
||||
}
|
||||
return secretv1beta1.NewNamedKeeperConfig(keeperName, azure)
|
||||
return azure
|
||||
case secretv1beta1.GCPKeeperType:
|
||||
gcp := &secretv1beta1.KeeperGCPConfig{}
|
||||
if err := json.Unmarshal([]byte(payload), gcp); err != nil {
|
||||
return nil
|
||||
}
|
||||
return secretv1beta1.NewNamedKeeperConfig(keeperName, gcp)
|
||||
return gcp
|
||||
case secretv1beta1.HashiCorpKeeperType:
|
||||
hashicorp := &secretv1beta1.KeeperHashiCorpConfig{}
|
||||
if err := json.Unmarshal([]byte(payload), hashicorp); err != nil {
|
||||
return nil
|
||||
}
|
||||
return secretv1beta1.NewNamedKeeperConfig(keeperName, hashicorp)
|
||||
return hashicorp
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -253,16 +253,12 @@ func extractSecureValues(kp *secretv1beta1.Keeper) map[string]struct{} {
|
||||
case kp.Spec.Aws != nil:
|
||||
secureValues := make(map[string]struct{}, 0)
|
||||
|
||||
if kp.Spec.Aws.AccessKey == nil {
|
||||
return secureValues
|
||||
if kp.Spec.Aws.AccessKeyID.SecureValueName != "" {
|
||||
secureValues[kp.Spec.Aws.AccessKeyID.SecureValueName] = struct{}{}
|
||||
}
|
||||
|
||||
if kp.Spec.Aws.AccessKey.AccessKeyID.SecureValueName != "" {
|
||||
secureValues[kp.Spec.Aws.AccessKey.AccessKeyID.SecureValueName] = struct{}{}
|
||||
}
|
||||
|
||||
if kp.Spec.Aws.AccessKey.SecretAccessKey.SecureValueName != "" {
|
||||
secureValues[kp.Spec.Aws.AccessKey.SecretAccessKey.SecureValueName] = struct{}{}
|
||||
if kp.Spec.Aws.SecretAccessKey.SecureValueName != "" {
|
||||
secureValues[kp.Spec.Aws.SecretAccessKey.SecureValueName] = struct{}{}
|
||||
}
|
||||
|
||||
return secureValues
|
||||
@@ -288,13 +284,13 @@ func extractSecureValues(kp *secretv1beta1.Keeper) map[string]struct{} {
|
||||
func getKeeperConfig(keeper *secretv1beta1.Keeper) secretv1beta1.KeeperConfig {
|
||||
switch keeper.Spec.GetType() {
|
||||
case secretv1beta1.AWSKeeperType:
|
||||
return secretv1beta1.NewNamedKeeperConfig(keeper.Name, keeper.Spec.Aws)
|
||||
return keeper.Spec.Aws
|
||||
case secretv1beta1.AzureKeeperType:
|
||||
return secretv1beta1.NewNamedKeeperConfig(keeper.Name, keeper.Spec.Azure)
|
||||
return keeper.Spec.Azure
|
||||
case secretv1beta1.GCPKeeperType:
|
||||
return secretv1beta1.NewNamedKeeperConfig(keeper.Name, keeper.Spec.Gcp)
|
||||
return keeper.Spec.Gcp
|
||||
case secretv1beta1.HashiCorpKeeperType:
|
||||
return secretv1beta1.NewNamedKeeperConfig(keeper.Name, keeper.Spec.HashiCorpVault)
|
||||
return keeper.Spec.HashiCorpVault
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -609,7 +609,7 @@ func (s *keeperMetadataStorage) GetKeeperConfig(ctx context.Context, namespace s
|
||||
|
||||
// Check if keeper is the systemwide one.
|
||||
if name == contracts.SystemKeeperName {
|
||||
return secretv1beta1.NewNamedKeeperConfig(contracts.SystemKeeperName, &secretv1beta1.SystemKeeperConfig{}), nil
|
||||
return &secretv1beta1.SystemKeeperConfig{}, nil
|
||||
}
|
||||
|
||||
// Load keeper config from metadata store, or TODO: keeper cache.
|
||||
@@ -618,7 +618,7 @@ func (s *keeperMetadataStorage) GetKeeperConfig(ctx context.Context, namespace s
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keeperConfig := parseKeeperConfigJson(kp.Name, secretv1beta1.KeeperType(kp.Type), kp.Payload)
|
||||
keeperConfig := toProvider(secretv1beta1.KeeperType(kp.Type), kp.Payload)
|
||||
|
||||
// TODO: this would be a good place to check if credentials are secure values and load them.
|
||||
return keeperConfig, nil
|
||||
@@ -636,6 +636,13 @@ func (s *keeperMetadataStorage) SetAsActive(ctx context.Context, namespace xkube
|
||||
return fmt.Errorf("template %q: %w", sqlKeeperSetAsActive.Name(), err)
|
||||
}
|
||||
|
||||
// Check keeper exists. No need to worry about time of check to time of use
|
||||
// since trying to activate a just deleted keeper will result in all
|
||||
// keepers being inactive and defaulting to the system keeper.
|
||||
if _, err := s.read(ctx, namespace.String(), name, contracts.ReadOpts{}); err != nil {
|
||||
return fmt.Errorf("reading keeper before setting as active: %w", err)
|
||||
}
|
||||
|
||||
_, err = s.db.ExecContext(ctx, query, req.GetArgs()...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting keeper as active %q: %w", query, err)
|
||||
@@ -719,7 +726,7 @@ func (s *keeperMetadataStorage) GetActiveKeeperConfig(ctx context.Context, names
|
||||
if err != nil {
|
||||
// When there are not active keepers, default to the system keeper
|
||||
if errors.Is(err, contracts.ErrKeeperNotFound) {
|
||||
return contracts.SystemKeeperName, secretv1beta1.NewNamedKeeperConfig(contracts.SystemKeeperName, &secretv1beta1.SystemKeeperConfig{}), nil
|
||||
return contracts.SystemKeeperName, &secretv1beta1.SystemKeeperConfig{}, nil
|
||||
}
|
||||
return "", nil, fmt.Errorf("fetching active keeper from db: %w", err)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user