Compare commits

...

6 Commits

Author SHA1 Message Date
Josh Hunt
83d342771d CI: Additional changes for +security versions (#94854)
* Build: Fix docker manifest create not using correct IMAGE_TAG

* Support publishing security versions of NPM packages

---------

Co-authored-by: Andreas Christou <andreas.christou@grafana.com>
Co-authored-by: Kevin Minehart <kmineh0151@gmail.com>
Co-authored-by: Diego Augusto Molina <diegoaugustomolina@gmail.com>
2024-11-12 08:23:37 -06:00
Kevin Minehart
1a60a3483e [v11.2.x] CI: Support more version formats in publishing (#94749)
CI: Support more version formats in publishing (#94575)

* cleanup dead code
* add tests and rewrite publish grafanacom steps to reuse
* add pkg/build tests; don't upload CDN assets on grafana releases

(cherry picked from commit 7a2edd35d5)
(cherry picked from commit 39d9542f7f)
2024-11-12 07:50:54 -06:00
github-actions[bot]
5ddc329279 apply security patch: v11.3.x/194-202410181505.patch
commit 1395b7ccf1656ff6cfd3aed5eb9b5163861fe3b2
Author: Leandro Deveikis <leandro.deveikis@gmail.com>
Date:   Fri Oct 11 14:44:30 2024 -0300

    Restrict dashboards, folders and datasources by the org id of the signed in user
2024-10-24 14:40:28 -07:00
Josh Hunt
d9455ff7db bump whatsnew url (#94982) 2024-10-18 17:49:50 +01:00
Sam Jewell
5221584143 [v11.3.x] ServerSideExpressions: Disable SQL Expressions to prevent RCE and LFI vulnerability (#94955)
ServerSideExpressions: Disable SQL Expressions to prevent RCE and LFI vulnerability (#94942)

* disable sql expressions

remove duckdb ref

* Run `make update-workspace`

---------

Co-authored-by: Scott Lepper <scott.lepper@gmail.com>
(cherry picked from commit ea71201ddc)
2024-10-18 15:41:25 +01:00
grafana-delivery-bot[bot]
f6ac93578a [v11.3.x] Dashboards: Links to explore should respect subpath (#94975)
Dashboards: Links to explore should respect subpath (#94525)

* Links to explore should respect subpath

* Change to using assureBaseUrl

* Change back to normal single quotes

(cherry picked from commit 315778227b)

Co-authored-by: Oscar Kilhed <oscar.kilhed@grafana.com>
2024-10-18 15:40:55 +01:00
95 changed files with 1423 additions and 5258 deletions

View File

@@ -71,18 +71,10 @@ steps:
- echo $DRONE_RUNNER_NAME
image: alpine:3.20.3
name: identify-runner
- commands:
- go build -o ./bin/build -ldflags '-extldflags -static' ./pkg/build/cmd
depends_on: []
environment:
CGO_ENABLED: 0
image: golang:1.23.1-alpine
name: compile-build-cmd
- commands:
- go install github.com/bazelbuild/buildtools/buildifier@latest
- buildifier --lint=warn -mode=check -r .
depends_on:
- compile-build-cmd
depends_on: []
image: golang:1.23.1-alpine
name: lint-starlark
trigger:
@@ -547,7 +539,7 @@ steps:
name: identify-runner
- commands:
- mkdir -p bin
- curl -fL -o bin/grabpl https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.53/grabpl
- curl -fL -o bin/grabpl https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.56/grabpl
- chmod +x bin/grabpl
image: byrnedo/alpine-curl:0.1.8
name: grabpl
@@ -1003,7 +995,7 @@ steps:
name: clone-enterprise
- commands:
- mkdir -p bin
- curl -fL -o bin/grabpl https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.53/grabpl
- curl -fL -o bin/grabpl https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.56/grabpl
- chmod +x bin/grabpl
image: byrnedo/alpine-curl:0.1.8
name: grabpl
@@ -1278,13 +1270,6 @@ platform:
os: linux
services: []
steps:
- commands:
- go build -o ./bin/build -ldflags '-extldflags -static' ./pkg/build/cmd
depends_on: []
environment:
CGO_ENABLED: 0
image: golang:1.23.1-alpine
name: compile-build-cmd
- commands:
- apt-get update -yq && apt-get install shellcheck
- shellcheck -e SC1071 -e SC2162 scripts/**/*.sh
@@ -1972,7 +1957,7 @@ steps:
name: identify-runner
- commands:
- mkdir -p bin
- curl -fL -o bin/grabpl https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.53/grabpl
- curl -fL -o bin/grabpl https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.56/grabpl
- chmod +x bin/grabpl
image: byrnedo/alpine-curl:0.1.8
name: grabpl
@@ -2525,7 +2510,7 @@ services:
steps:
- commands:
- mkdir -p bin
- curl -fL -o bin/grabpl https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.53/grabpl
- curl -fL -o bin/grabpl https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.56/grabpl
- chmod +x bin/grabpl
image: byrnedo/alpine-curl:0.1.8
name: grabpl
@@ -2730,7 +2715,7 @@ steps:
name: identify-runner
- commands:
- $$ProgressPreference = "SilentlyContinue"
- Invoke-WebRequest https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.53/windows/grabpl.exe
- Invoke-WebRequest https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.56/windows/grabpl.exe
-OutFile grabpl.exe
image: grafana/ci-wix:0.1.1
name: windows-init
@@ -3157,7 +3142,7 @@ services:
steps:
- commands:
- mkdir -p bin
- curl -fL -o bin/grabpl https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.53/grabpl
- curl -fL -o bin/grabpl https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.56/grabpl
- chmod +x bin/grabpl
image: byrnedo/alpine-curl:0.1.8
name: grabpl
@@ -3402,7 +3387,7 @@ steps:
name: identify-runner
- commands:
- mkdir -p bin
- curl -fL -o bin/grabpl https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.53/grabpl
- curl -fL -o bin/grabpl https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.56/grabpl
- chmod +x bin/grabpl
image: byrnedo/alpine-curl:0.1.8
name: grabpl
@@ -3434,31 +3419,32 @@ steps:
- |2-
bash -c '
IMAGE_TAG=$(echo "$${TAG}" | sed -e "s/+/-/g")
debug=
if [[ -n $${DRY_RUN} ]]; then debug=echo; fi
docker login -u $${DOCKER_USER} -p $${DOCKER_PASSWORD}
# Push the grafana-image-tags images
$$debug docker push grafana/grafana-image-tags:$${TAG}-amd64
$$debug docker push grafana/grafana-image-tags:$${TAG}-arm64
$$debug docker push grafana/grafana-image-tags:$${TAG}-armv7
$$debug docker push grafana/grafana-image-tags:$${TAG}-ubuntu-amd64
$$debug docker push grafana/grafana-image-tags:$${TAG}-ubuntu-arm64
$$debug docker push grafana/grafana-image-tags:$${TAG}-ubuntu-armv7
$$debug docker push grafana/grafana-image-tags:$${IMAGE_TAG}-amd64
$$debug docker push grafana/grafana-image-tags:$${IMAGE_TAG}-arm64
$$debug docker push grafana/grafana-image-tags:$${IMAGE_TAG}-armv7
$$debug docker push grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-amd64
$$debug docker push grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-arm64
$$debug docker push grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-armv7
# Create the grafana manifests
$$debug docker manifest create grafana/grafana:${TAG} grafana/grafana-image-tags:$${TAG}-amd64 grafana/grafana-image-tags:$${TAG}-arm64 grafana/grafana-image-tags:$${TAG}-armv7
$$debug docker manifest create grafana/grafana:$${IMAGE_TAG} grafana/grafana-image-tags:$${IMAGE_TAG}-amd64 grafana/grafana-image-tags:$${IMAGE_TAG}-arm64 grafana/grafana-image-tags:$${IMAGE_TAG}-armv7
$$debug docker manifest create grafana/grafana:${TAG}-ubuntu grafana/grafana-image-tags:$${TAG}-ubuntu-amd64 grafana/grafana-image-tags:$${TAG}-ubuntu-arm64 grafana/grafana-image-tags:$${TAG}-ubuntu-armv7
$$debug docker manifest create grafana/grafana:$${IMAGE_TAG}-ubuntu grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-amd64 grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-arm64 grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-armv7
# Push the grafana manifests
$$debug docker manifest push grafana/grafana:$${TAG}
$$debug docker manifest push grafana/grafana:$${TAG}-ubuntu
$$debug docker manifest push grafana/grafana:$${IMAGE_TAG}
$$debug docker manifest push grafana/grafana:$${IMAGE_TAG}-ubuntu
# if LATEST is set, then also create & push latest
if [[ -n $${LATEST} ]]; then
$$debug docker manifest create grafana/grafana:latest grafana/grafana-image-tags:$${TAG}-amd64 grafana/grafana-image-tags:$${TAG}-arm64 grafana/grafana-image-tags:$${TAG}-armv7
$$debug docker manifest create grafana/grafana:latest-ubuntu grafana/grafana-image-tags:$${TAG}-ubuntu-amd64 grafana/grafana-image-tags:$${TAG}-ubuntu-arm64 grafana/grafana-image-tags:$${TAG}-ubuntu-armv7
$$debug docker manifest create grafana/grafana:latest grafana/grafana-image-tags:$${IMAGE_TAG}-amd64 grafana/grafana-image-tags:$${IMAGE_TAG}-arm64 grafana/grafana-image-tags:$${IMAGE_TAG}-armv7
$$debug docker manifest create grafana/grafana:latest-ubuntu grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-amd64 grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-arm64 grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-armv7
$$debug docker manifest push grafana/grafana:latest
$$debug docker manifest push grafana/grafana:latest-ubuntu
@@ -3533,7 +3519,7 @@ steps:
name: identify-runner
- commands:
- mkdir -p bin
- curl -fL -o bin/grabpl https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.53/grabpl
- curl -fL -o bin/grabpl https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.56/grabpl
- chmod +x bin/grabpl
image: byrnedo/alpine-curl:0.1.8
name: grabpl
@@ -3565,31 +3551,32 @@ steps:
- |2-
bash -c '
IMAGE_TAG=$(echo "$${TAG}" | sed -e "s/+/-/g")
debug=
if [[ -n $${DRY_RUN} ]]; then debug=echo; fi
docker login -u $${DOCKER_USER} -p $${DOCKER_PASSWORD}
# Push the grafana-image-tags images
$$debug docker push grafana/grafana-image-tags:$${TAG}-amd64
$$debug docker push grafana/grafana-image-tags:$${TAG}-arm64
$$debug docker push grafana/grafana-image-tags:$${TAG}-armv7
$$debug docker push grafana/grafana-image-tags:$${TAG}-ubuntu-amd64
$$debug docker push grafana/grafana-image-tags:$${TAG}-ubuntu-arm64
$$debug docker push grafana/grafana-image-tags:$${TAG}-ubuntu-armv7
$$debug docker push grafana/grafana-image-tags:$${IMAGE_TAG}-amd64
$$debug docker push grafana/grafana-image-tags:$${IMAGE_TAG}-arm64
$$debug docker push grafana/grafana-image-tags:$${IMAGE_TAG}-armv7
$$debug docker push grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-amd64
$$debug docker push grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-arm64
$$debug docker push grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-armv7
# Create the grafana manifests
$$debug docker manifest create grafana/grafana:${TAG} grafana/grafana-image-tags:$${TAG}-amd64 grafana/grafana-image-tags:$${TAG}-arm64 grafana/grafana-image-tags:$${TAG}-armv7
$$debug docker manifest create grafana/grafana:$${IMAGE_TAG} grafana/grafana-image-tags:$${IMAGE_TAG}-amd64 grafana/grafana-image-tags:$${IMAGE_TAG}-arm64 grafana/grafana-image-tags:$${IMAGE_TAG}-armv7
$$debug docker manifest create grafana/grafana:${TAG}-ubuntu grafana/grafana-image-tags:$${TAG}-ubuntu-amd64 grafana/grafana-image-tags:$${TAG}-ubuntu-arm64 grafana/grafana-image-tags:$${TAG}-ubuntu-armv7
$$debug docker manifest create grafana/grafana:$${IMAGE_TAG}-ubuntu grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-amd64 grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-arm64 grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-armv7
# Push the grafana manifests
$$debug docker manifest push grafana/grafana:$${TAG}
$$debug docker manifest push grafana/grafana:$${TAG}-ubuntu
$$debug docker manifest push grafana/grafana:$${IMAGE_TAG}
$$debug docker manifest push grafana/grafana:$${IMAGE_TAG}-ubuntu
# if LATEST is set, then also create & push latest
if [[ -n $${LATEST} ]]; then
$$debug docker manifest create grafana/grafana:latest grafana/grafana-image-tags:$${TAG}-amd64 grafana/grafana-image-tags:$${TAG}-arm64 grafana/grafana-image-tags:$${TAG}-armv7
$$debug docker manifest create grafana/grafana:latest-ubuntu grafana/grafana-image-tags:$${TAG}-ubuntu-amd64 grafana/grafana-image-tags:$${TAG}-ubuntu-arm64 grafana/grafana-image-tags:$${TAG}-ubuntu-armv7
$$debug docker manifest create grafana/grafana:latest grafana/grafana-image-tags:$${IMAGE_TAG}-amd64 grafana/grafana-image-tags:$${IMAGE_TAG}-arm64 grafana/grafana-image-tags:$${IMAGE_TAG}-armv7
$$debug docker manifest create grafana/grafana:latest-ubuntu grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-amd64 grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-arm64 grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-armv7
$$debug docker manifest push grafana/grafana:latest
$$debug docker manifest push grafana/grafana:latest-ubuntu
@@ -3681,7 +3668,8 @@ steps:
image: golang:1.23.1-alpine
name: compile-build-cmd
- commands:
- ./bin/build artifacts packages --tag $${DRONE_TAG} --src-bucket $${PRERELEASE_BUCKET}
- ./bin/build artifacts packages --artifacts-editions=oss --tag $${DRONE_TAG} --src-bucket
$${PRERELEASE_BUCKET}
depends_on:
- compile-build-cmd
environment:
@@ -3691,19 +3679,6 @@ steps:
from_secret: prerelease_bucket
image: grafana/grafana-ci-deploy:1.3.3
name: publish-artifacts
- commands:
- ./bin/build artifacts static-assets --tag ${DRONE_TAG} --static-asset-editions=grafana-oss
depends_on:
- compile-build-cmd
environment:
GCP_KEY:
from_secret: gcp_grafanauploads_base64
PRERELEASE_BUCKET:
from_secret: prerelease_bucket
STATIC_ASSET_EDITIONS:
from_secret: static_asset_editions
image: grafana/grafana-ci-deploy:1.3.3
name: publish-static-assets
- commands:
- ./bin/build artifacts storybook --tag ${DRONE_TAG}
depends_on:
@@ -3723,7 +3698,6 @@ steps:
-f latest=$${LATEST} --repo=grafana/grafana release-pr.yml
depends_on:
- publish-artifacts
- publish-static-assets
environment:
GH_CLI_URL: https://github.com/cli/cli/releases/download/v2.50.0/gh_2.50.0_linux_amd64.tar.gz
GITHUB_TOKEN:
@@ -3855,6 +3829,7 @@ platform:
services: []
steps:
- commands:
- export version=$(echo ${TAG} | sed -e "s/+security-/-/g")
- 'echo "Step 1: Updating package lists..."'
- apt-get update >/dev/null 2>&1
- 'echo "Step 2: Installing prerequisites..."'
@@ -3870,7 +3845,7 @@ steps:
- 'echo "Step 5: Installing Grafana..."'
- for i in $(seq 1 60); do
- ' if apt-get update >/dev/null 2>&1 && DEBIAN_FRONTEND=noninteractive apt-get
install -yq grafana=${TAG} >/dev/null 2>&1; then'
install -yq grafana=$version >/dev/null 2>&1; then'
- ' echo "Command succeeded on attempt $i"'
- ' break'
- ' else'
@@ -3884,10 +3859,10 @@ steps:
- ' fi'
- done
- 'echo "Step 6: Verifying Grafana installation..."'
- 'if dpkg -s grafana | grep -q "Version: ${TAG}"; then'
- ' echo "Successfully verified Grafana version ${TAG}"'
- 'if dpkg -s grafana | grep -q "Version: $version"; then'
- ' echo "Successfully verified Grafana version $version"'
- else
- ' echo "Failed to verify Grafana version ${TAG}"'
- ' echo "Failed to verify Grafana version $version"'
- ' exit 1'
- fi
- echo "Verification complete."
@@ -3915,11 +3890,12 @@ steps:
sslcacert=/etc/pki/tls/certs/ca-bundle.crt
' > /etc/yum.repos.d/grafana.repo
- 'echo "Step 5: Checking RPM repository..."'
- dnf list available grafana-${TAG}
- export version=$(echo "${TAG}" | sed -e "s/+security-/^security_/g")
- dnf list available grafana-$version
- if [ $? -eq 0 ]; then
- ' echo "Grafana package found in repository. Installing from repo..."'
- for i in $(seq 1 60); do
- ' if dnf install -y --nogpgcheck grafana-${TAG} >/dev/null 2>&1; then'
- ' if dnf install -y --nogpgcheck grafana-$version >/dev/null 2>&1; then'
- ' echo "Command succeeded on attempt $i"'
- ' break'
- ' else'
@@ -3936,16 +3912,16 @@ steps:
- ' rpm --import https://rpm.grafana.com/gpg.key'
- ' rpm -qa gpg-pubkey* | xargs rpm -qi | grep -i grafana'
- else
- ' echo "Grafana package version ${TAG} not found in repository."'
- ' echo "Grafana package version $version not found in repository."'
- ' dnf repolist'
- ' dnf list available grafana*'
- ' exit 1'
- fi
- 'echo "Step 6: Verifying Grafana installation..."'
- if rpm -q grafana | grep -q "${TAG}"; then
- ' echo "Successfully verified Grafana version ${TAG}"'
- if rpm -q grafana | grep -q "$verison"; then
- ' echo "Successfully verified Grafana version $version"'
- else
- ' echo "Failed to verify Grafana version ${TAG}"'
- ' echo "Failed to verify Grafana version $version"'
- ' exit 1'
- fi
- echo "Verification complete."
@@ -4032,6 +4008,7 @@ steps:
from_secret: packages_service_account
target_bucket: grafana-packages
- commands:
- export version=$(echo ${TAG} | sed -e "s/+security-/-/g")
- 'echo "Step 1: Updating package lists..."'
- apt-get update >/dev/null 2>&1
- 'echo "Step 2: Installing prerequisites..."'
@@ -4047,7 +4024,7 @@ steps:
- 'echo "Step 5: Installing Grafana..."'
- for i in $(seq 1 60); do
- ' if apt-get update >/dev/null 2>&1 && DEBIAN_FRONTEND=noninteractive apt-get
install -yq grafana=${TAG} >/dev/null 2>&1; then'
install -yq grafana=$version >/dev/null 2>&1; then'
- ' echo "Command succeeded on attempt $i"'
- ' break'
- ' else'
@@ -4061,10 +4038,10 @@ steps:
- ' fi'
- done
- 'echo "Step 6: Verifying Grafana installation..."'
- 'if dpkg -s grafana | grep -q "Version: ${TAG}"; then'
- ' echo "Successfully verified Grafana version ${TAG}"'
- 'if dpkg -s grafana | grep -q "Version: $version"; then'
- ' echo "Successfully verified Grafana version $version"'
- else
- ' echo "Failed to verify Grafana version ${TAG}"'
- ' echo "Failed to verify Grafana version $version"'
- ' exit 1'
- fi
- echo "Verification complete."
@@ -4093,11 +4070,12 @@ steps:
sslcacert=/etc/pki/tls/certs/ca-bundle.crt
' > /etc/yum.repos.d/grafana.repo
- 'echo "Step 5: Checking RPM repository..."'
- dnf list available grafana-${TAG}
- export version=$(echo "${TAG}" | sed -e "s/+security-/^security_/g")
- dnf list available grafana-$version
- if [ $? -eq 0 ]; then
- ' echo "Grafana package found in repository. Installing from repo..."'
- for i in $(seq 1 60); do
- ' if dnf install -y --nogpgcheck grafana-${TAG} >/dev/null 2>&1; then'
- ' if dnf install -y --nogpgcheck grafana-$version >/dev/null 2>&1; then'
- ' echo "Command succeeded on attempt $i"'
- ' break'
- ' else'
@@ -4114,16 +4092,16 @@ steps:
- ' rpm --import https://rpm.grafana.com/gpg.key'
- ' rpm -qa gpg-pubkey* | xargs rpm -qi | grep -i grafana'
- else
- ' echo "Grafana package version ${TAG} not found in repository."'
- ' echo "Grafana package version $version not found in repository."'
- ' dnf repolist'
- ' dnf list available grafana*'
- ' exit 1'
- fi
- 'echo "Step 6: Verifying Grafana installation..."'
- if rpm -q grafana | grep -q "${TAG}"; then
- ' echo "Successfully verified Grafana version ${TAG}"'
- if rpm -q grafana | grep -q "$verison"; then
- ' echo "Successfully verified Grafana version $version"'
- else
- ' echo "Failed to verify Grafana version ${TAG}"'
- ' echo "Failed to verify Grafana version $version"'
- ' exit 1'
- fi
- echo "Verification complete."
@@ -4421,7 +4399,7 @@ steps:
name: identify-runner
- commands:
- $$ProgressPreference = "SilentlyContinue"
- Invoke-WebRequest https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.53/windows/grabpl.exe
- Invoke-WebRequest https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.56/windows/grabpl.exe
-OutFile grabpl.exe
image: grafana/ci-wix:0.1.1
name: windows-init
@@ -5221,7 +5199,7 @@ services:
steps:
- commands:
- mkdir -p bin
- curl -fL -o bin/grabpl https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.53/grabpl
- curl -fL -o bin/grabpl https://grafana-downloads.storage.googleapis.com/grafana-build-pipeline/v3.0.56/grabpl
- chmod +x bin/grabpl
image: byrnedo/alpine-curl:0.1.8
name: grabpl
@@ -6013,6 +5991,6 @@ kind: secret
name: gcr_credentials
---
kind: signature
hmac: e618274ea7a8bfbf3d5e151d459348aa9382fe63fe7fef76c997db3cba74779f
hmac: 41df5b1fdbd1b3c9aa915919ae5be16d2a188cbaf6b243c14fd66f94db0db8d8
...

8
go.mod
View File

@@ -144,7 +144,6 @@ require (
github.com/redis/go-redis/v9 v9.1.0 // @grafana/alerting-backend
github.com/robfig/cron/v3 v3.0.1 // @grafana/grafana-backend-group
github.com/russellhaering/goxmldsig v1.4.0 // @grafana/grafana-backend-group
github.com/scottlepp/go-duck v0.1.0 // @grafana/grafana-app-platform-squad
github.com/spf13/cobra v1.8.1 // @grafana/grafana-app-platform-squad
github.com/spf13/pflag v1.0.5 // @grafana-app-platform-squad
github.com/spyzhov/ajson v0.9.0 // @grafana/grafana-app-platform-squad
@@ -222,7 +221,6 @@ require (
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/DATA-DOG/go-sqlmock v1.5.2 // @grafana/grafana-search-and-storage
github.com/FZambia/eagle v0.1.0 // indirect
github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/RoaringBitmap/roaring v1.9.3 // indirect
@@ -230,7 +228,6 @@ require (
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/apache/thrift v0.20.0 // indirect
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
@@ -338,7 +335,6 @@ require (
github.com/jpillora/backoff v1.0.0 // indirect
github.com/jszwedko/go-datemath v0.1.1-0.20230526204004-640a500621d6 // indirect
github.com/karlseguin/ccache/v3 v3.0.5 // indirect
github.com/klauspost/asmfmt v1.3.2 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
github.com/kr/text v0.2.0 // indirect
@@ -354,8 +350,6 @@ require (
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mfridman/interpolate v0.0.2 // indirect
github.com/miekg/dns v1.1.59 // indirect
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
@@ -488,8 +482,6 @@ require (
github.com/grafana/grafana-app-sdk v0.19.0 // indirect
github.com/grafana/grafana/pkg/semconv v0.0.0-20240808213237-f4d2e064f435 // indirect
github.com/grafana/sqlds/v4 v4.1.0 // indirect
github.com/hairyhenderson/go-which v0.2.0 // indirect
github.com/iancoleman/orderedmap v0.3.0 // indirect
github.com/maypok86/otter v1.2.2 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/shadowspore/fossil-delta v0.0.0-20240102155221-e3a8590b820b // indirect

15
go.sum
View File

@@ -1434,7 +1434,6 @@ github.com/FZambia/eagle v0.1.0/go.mod h1:YjGSPVkQTNcVLfzEUQJNgW9ScPR0K4u/Ky0yeF
github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM=
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU=
github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk=
github.com/KimMachineGun/automemlimit v0.6.0/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
@@ -1519,14 +1518,10 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU=
github.com/apache/thrift v0.17.0/go.mod h1:OLxhMRJxomX+1I/KUw03qoV3mMz16BwaKI+d4fPBx7Q=
github.com/apache/thrift v0.20.0 h1:631+KvYbsBZxmuJjYwhezVsrfc/TbqtZV4QcxOX1fOI=
github.com/apache/thrift v0.20.0/go.mod h1:hOk1BQqcp2OLzGsyVXdfMk7YFlMxK3aoEVhjD06QhB8=
github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=
github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de h1:FxWPpzIjnTlhPwqqXc4/vE0f7GvRjuAsbW+HOIe8KnA=
github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de/go.mod h1:DCaWoUhZrYW9p1lxo/cm8EmUOOzAPSEZNGF2DK1dJgw=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
@@ -2353,8 +2348,6 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYp
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hairyhenderson/go-which v0.2.0 h1:vxoCKdgYc6+MTBzkJYhWegksHjjxuXPNiqo5G2oBM+4=
github.com/hairyhenderson/go-which v0.2.0/go.mod h1:U1BQQRCjxYHfOkXDyCgst7OZVknbqI7KuGKhGnmyIik=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0=
@@ -2459,8 +2452,6 @@ github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4
github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo=
github.com/iancoleman/orderedmap v0.3.0 h1:5cbR2grmZR/DiVt+VJopEhtVs9YGInGIxAoMJn+Ichc=
github.com/iancoleman/orderedmap v0.3.0/go.mod h1:XuLcCUkdL5owUCQeF2Ue9uuw1EptkJDkXXS7VoV7XGE=
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@@ -2569,7 +2560,6 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4=
github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
@@ -2698,9 +2688,7 @@ github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJys
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs=
github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI=
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE=
github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
@@ -3083,8 +3071,6 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26 h1:F+GIVtGqCFxPxO46ujf8cEOP574MBoRm3gNbPXECbxs=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/scottlepp/go-duck v0.1.0 h1:Lfunl1wd767v0dF0/dr+mBh+KnUFuDmgNycC76NJjeE=
github.com/scottlepp/go-duck v0.1.0/go.mod h1:xGoYUbgph5AbxwsMElWv2i/mgzQl89WIgwE69Ytml7Q=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
@@ -4537,7 +4523,6 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -270,8 +270,6 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNL
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 h1:YUUxeiOWgdAQE3pXt2H7QXzZs0q8UBjgRbl56qo8GYM=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2/go.mod h1:dmXQgZuiSubAecswZE+Sm8jkvEa7kQgTPVRvwL/nd0E=
github.com/Azure/go-amqp v1.0.5 h1:po5+ljlcNSU8xtapHTe8gIc8yHxCzC03E8afH2g1ftU=
github.com/Azure/go-amqp v1.0.5/go.mod h1:vZAogwdrkbyK3Mla8m/CxSc/aKdnTZ4IbPxl51Y5WZE=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk=
@@ -294,6 +292,7 @@ github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.0 h1:oVLqHXhnYtUwM89y9T1
github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.0/go.mod h1:dppbR7CwXD4pgtV9t3wD1812RaLDcBjtblcDF5f1vI0=
github.com/IBM/sarama v1.43.0 h1:YFFDn8mMI2QL0wOrG0J2sFoVIAFl7hS9JQi2YZsXtJc=
github.com/IBM/sarama v1.43.0/go.mod h1:zlE6HEbC/SMQ9mhEYaF7nNLYOUyrs0obySKCckWP9BM=
github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU=
github.com/Joker/jade v1.1.3 h1:Qbeh12Vq6BxURXT1qZBRHsDxeURB8ztcL6f3EXSGeHk=
github.com/Joker/jade v1.1.3/go.mod h1:T+2WLyt7VH6Lp0TRxQrUYEs64nRc83wkMQrfeIQKduM=
github.com/KimMachineGun/automemlimit v0.6.0 h1:p/BXkH+K40Hax+PuWWPQ478hPjsp9h1CPDhLlA3Z37E=
@@ -342,6 +341,8 @@ github.com/andybalholm/cascadia v1.3.1/go.mod h1:R4bJ1UQfqADjvDa4P6HZHLh/3OxWWEq
github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 h1:goHVqTbFX3AIo0tzGr14pgfAW2ZfPChKO21Z9MGf/gk=
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 h1:q4dksr6ICHXqG5hm0ZW5IHyeEJXoIJSOZeBLmWPNeIQ=
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs=
github.com/apache/arrow/go/v10 v10.0.1 h1:n9dERvixoC/1JjDmBcs9FPaEryoANa2sCgVFo6ez9cI=
@@ -350,6 +351,8 @@ github.com/apache/arrow/go/v12 v12.0.1 h1:JsR2+hzYYjgSUkBSaahpqCetqZMr76djX80fF/
github.com/apache/arrow/go/v13 v13.0.0 h1:kELrvDQuKZo8csdWYqBQfyi431x6Zs/YJTEgUuSVcWk=
github.com/apache/arrow/go/v13 v13.0.0/go.mod h1:W69eByFNO0ZR30q1/7Sr9d83zcVZmF2MiP3fFYAWJOc=
github.com/apache/arrow/go/v14 v14.0.2 h1:N8OkaJEOfI3mEZt07BIkvo4sC6XDbL+48MBPWO5IONw=
github.com/apache/thrift v0.20.0 h1:631+KvYbsBZxmuJjYwhezVsrfc/TbqtZV4QcxOX1fOI=
github.com/apache/thrift v0.20.0/go.mod h1:hOk1BQqcp2OLzGsyVXdfMk7YFlMxK3aoEVhjD06QhB8=
github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3 h1:ZSTrOEhiM5J5RFxEaFvMZVEAM1KvT1YzbEOwB2EAGjA=
github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA=
@@ -561,10 +564,6 @@ github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs0
github.com/google/go-jsonnet v0.18.0 h1:/6pTy6g+Jh1a1I2UMoAODkqELFiVIdOxbNwv0DDzoOg=
github.com/google/go-jsonnet v0.18.0/go.mod h1:C3fTzyVJDslXdiTqw/bTFk7vSGyCtH3MGRbDfvEwGd0=
github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9 h1:OF1IPgv+F4NmqmJ98KTjdN97Vs1JxDPB3vbmYzV2dpk=
github.com/google/go-replayers/grpcreplay v1.3.0 h1:1Keyy0m1sIpqstQmgz307zhiJ1pV4uIlFds5weTmxbo=
github.com/google/go-replayers/grpcreplay v1.3.0/go.mod h1:v6NgKtkijC0d3e3RW8il6Sy5sqRVUwoQa4mHOGEy8DI=
github.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk=
github.com/google/go-replayers/httpreplay v1.2.0/go.mod h1:WahEFFZZ7a1P4VM1qEeHy+tME4bwyqPcwWbNlUI1Mcg=
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
@@ -662,6 +661,7 @@ github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3ro
github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46 h1:veS9QfglfvqAw2e+eeNT/SbGySq8ajECXJ9e4fPoLhY=
github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4=
github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
@@ -703,6 +703,8 @@ github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04
github.com/microcosm-cc/bluemonday v1.0.25 h1:4NEwSfiJ+Wva0VxN5B8OwMicaJvD8r9tlJWm9rtloEg=
github.com/microcosm-cc/bluemonday v1.0.25/go.mod h1:ZIOjCQp1OrzBBPIJmfX4qDYFuhU02nx4bn030ixfHLE=
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs=
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
github.com/mitchellh/cli v1.1.5 h1:OxRIeJXpAMztws/XHlN2vu6imG5Dpq+j61AzAX5fLng=
github.com/mitchellh/gox v0.4.0 h1:lfGJxY7ToLJQjHHwi0EX6uYBdK78egf954SQl13PQJc=
@@ -1022,6 +1024,7 @@ golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5D
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/image v0.6.0 h1:bR8b5okrPI3g/gyZakLZHeWxAR8Dn5CyxXv1hLH5g/4=
golang.org/x/image v0.6.0/go.mod h1:MXLdDR43H7cDJq5GEGXEVeeNhPgi+YYEQ2pC1byI1x0=
@@ -1049,8 +1052,8 @@ golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457 h1:zf5N6UOrA487eEFacMePxjXAJctxKmyjKUsjA11Uzuk=
golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=

View File

@@ -65,7 +65,7 @@
"generate-apis": "rtk-query-codegen-openapi ./scripts/generate-rtk-apis.ts"
},
"grafana": {
"whatsNewUrl": "https://grafana.com/docs/grafana/next/whatsnew/whats-new-in-v11-0/",
"whatsNewUrl": "https://grafana.com/docs/grafana/next/whatsnew/whats-new-in-v11-3/",
"releaseNotesUrl": "https://grafana.com/docs/grafana/next/release-notes/"
},
"devDependencies": {

View File

@@ -11,6 +11,8 @@ import (
"strconv"
"strings"
"time"
"github.com/urfave/cli/v2"
)
const (
@@ -30,6 +32,12 @@ func logError(message string, err error) int {
return 1
}
func RunCmdCLI(c *cli.Context) error {
os.Exit(RunCmd())
return nil
}
// RunCmd runs the build command and returns the exit code
func RunCmd() int {
opts := BuildOptsFromFlags()

View File

@@ -2,20 +2,6 @@ package main
import "github.com/urfave/cli/v2"
// ArgCountWrapper will cause the action to fail if there were not exactly `num` args provided.
func ArgCountWrapper(num int, action cli.ActionFunc) cli.ActionFunc {
return func(ctx *cli.Context) error {
if ctx.NArg() != num {
if err := cli.ShowSubcommandHelp(ctx); err != nil {
return cli.Exit(err.Error(), 1)
}
return cli.Exit("", 1)
}
return action(ctx)
}
}
// ArgCountWrapper will cause the action to fail if there were more than `num` args provided.
func MaxArgCountWrapper(max int, action cli.ActionFunc) cli.ActionFunc {
return func(ctx *cli.Context) error {

View File

@@ -1,68 +0,0 @@
package main
import (
"fmt"
"log"
"github.com/urfave/cli/v2"
"github.com/grafana/grafana/pkg/build/compilers"
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/errutil"
"github.com/grafana/grafana/pkg/build/grafana"
"github.com/grafana/grafana/pkg/build/syncutil"
)
func BuildBackend(ctx *cli.Context) error {
metadata, err := config.GenerateMetadata(ctx)
if err != nil {
return err
}
version := metadata.GrafanaVersion
var (
edition = config.Edition(ctx.String("edition"))
cfg = config.Config{
NumWorkers: ctx.Int("jobs"),
}
)
buildConfig, err := config.GetBuildConfig(metadata.ReleaseMode.Mode)
if err != nil {
return fmt.Errorf("could not get version / package info for mode '%s': %w", metadata.ReleaseMode.Mode, err)
}
const grafanaDir = "."
log.Printf("Building Grafana back-end, version %q, %s edition, variants [%v]",
version, edition, buildConfig.Variants)
p := syncutil.NewWorkerPool(cfg.NumWorkers)
defer p.Close()
if err := compilers.Install(); err != nil {
return cli.Exit(err.Error(), 1)
}
g, _ := errutil.GroupWithContext(ctx.Context)
for _, variant := range buildConfig.Variants {
variant := variant
opts := grafana.BuildVariantOpts{
Variant: variant,
Edition: edition,
Version: version,
GrafanaDir: grafanaDir,
}
p.Schedule(g.Wrap(func() error {
return grafana.BuildVariant(ctx.Context, opts)
}))
}
if err := g.Wait(); err != nil {
return cli.Exit(err.Error(), 1)
}
log.Println("Successfully built back-end binaries!")
return nil
}

View File

@@ -1,51 +0,0 @@
package main
import (
"log"
"github.com/urfave/cli/v2"
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/docker"
"github.com/grafana/grafana/pkg/build/gcloud"
)
func BuildDocker(c *cli.Context) error {
if err := docker.Init(); err != nil {
return err
}
metadata, err := config.GenerateMetadata(c)
if err != nil {
return err
}
useUbuntu := c.Bool("ubuntu")
buildConfig, err := config.GetBuildConfig(metadata.ReleaseMode.Mode)
if err != nil {
return err
}
shouldSave := buildConfig.Docker.ShouldSave
if shouldSave {
if err := gcloud.ActivateServiceAccount(); err != nil {
return err
}
}
edition := config.Edition(c.String("edition"))
version := metadata.GrafanaVersion
log.Printf("Building Docker images, version %s, %s edition, Ubuntu based: %v...", version, edition,
useUbuntu)
for _, arch := range buildConfig.Docker.Architectures {
if _, err := docker.BuildImage(version, arch, ".", useUbuntu, shouldSave, edition, metadata.ReleaseMode.Mode); err != nil {
return cli.Exit(err.Error(), 1)
}
}
log.Println("Successfully built Docker images!")
return nil
}

View File

@@ -1,39 +0,0 @@
package main
import (
"log"
"github.com/urfave/cli/v2"
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/errutil"
"github.com/grafana/grafana/pkg/build/frontend"
"github.com/grafana/grafana/pkg/build/syncutil"
)
func BuildFrontend(c *cli.Context) error {
metadata, err := config.GenerateMetadata(c)
if err != nil {
return err
}
cfg, mode, err := frontend.GetConfig(c, metadata)
if err != nil {
return err
}
p := syncutil.NewWorkerPool(cfg.NumWorkers)
defer p.Close()
g, _ := errutil.GroupWithContext(c.Context)
if err := frontend.Build(mode, frontend.GrafanaDir, p, g); err != nil {
return err
}
if err := g.Wait(); err != nil {
return err
}
log.Println("Successfully built Grafana front-end!")
return nil
}

View File

@@ -1,38 +0,0 @@
package main
import (
"log"
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/errutil"
"github.com/grafana/grafana/pkg/build/frontend"
"github.com/grafana/grafana/pkg/build/syncutil"
"github.com/urfave/cli/v2"
)
func BuildFrontendPackages(c *cli.Context) error {
metadata, err := config.GenerateMetadata(c)
if err != nil {
return err
}
cfg, mode, err := frontend.GetConfig(c, metadata)
if err != nil {
return err
}
p := syncutil.NewWorkerPool(cfg.NumWorkers)
defer p.Close()
g, _ := errutil.GroupWithContext(c.Context)
if err := frontend.BuildFrontendPackages(cfg.PackageVersion, mode, frontend.GrafanaDir, p, g); err != nil {
return cli.Exit(err.Error(), 1)
}
if err := g.Wait(); err != nil {
return cli.Exit(err.Error(), 1)
}
log.Println("Successfully built Grafana front-end packages!")
return nil
}

View File

@@ -1,53 +0,0 @@
package main
import (
"context"
"log"
"github.com/urfave/cli/v2"
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/errutil"
"github.com/grafana/grafana/pkg/build/plugins"
"github.com/grafana/grafana/pkg/build/syncutil"
)
func BuildInternalPlugins(c *cli.Context) error {
cfg := config.Config{
NumWorkers: c.Int("jobs"),
}
const grafanaDir = "."
metadata, err := config.GenerateMetadata(c)
if err != nil {
return err
}
buildConfig, err := config.GetBuildConfig(metadata.ReleaseMode.Mode)
if err != nil {
return err
}
log.Println("Building internal Grafana plug-ins...")
ctx := context.Background()
p := syncutil.NewWorkerPool(cfg.NumWorkers)
defer p.Close()
var g *errutil.Group
g, ctx = errutil.GroupWithContext(ctx)
if err := plugins.Build(ctx, grafanaDir, p, g, buildConfig); err != nil {
return cli.Exit(err.Error(), 1)
}
if err := g.Wait(); err != nil {
return cli.Exit(err.Error(), 1)
}
if err := plugins.Download(ctx, grafanaDir, p); err != nil {
return cli.Exit(err.Error(), 1)
}
log.Println("Successfully built Grafana plug-ins!")
return nil
}

View File

@@ -1,133 +0,0 @@
package main
import (
"fmt"
"log"
"os"
"strconv"
"github.com/urfave/cli/v2"
"github.com/grafana/grafana/pkg/build/env"
"github.com/grafana/grafana/pkg/build/git"
)
// checkOpts are options used to create a new GitHub check for the enterprise downstream test.
type checkOpts struct {
SHA string
URL string
Branch string
PR int
}
func getCheckOpts(args []string) (*checkOpts, error) {
branch, ok := env.Lookup("DRONE_SOURCE_BRANCH", args)
if !ok {
return nil, cli.Exit("Unable to retrieve build source branch", 1)
}
var (
rgx = git.PRCheckRegexp()
matches = rgx.FindStringSubmatch(branch)
)
sha, ok := env.Lookup("SOURCE_COMMIT", args)
if !ok {
if matches == nil || len(matches) <= 1 {
return nil, cli.Exit("Unable to retrieve source commit", 1)
}
sha = matches[2]
}
url, ok := env.Lookup("DRONE_BUILD_LINK", args)
if !ok {
return nil, cli.Exit(`missing environment variable "DRONE_BUILD_LINK"`, 1)
}
prStr, ok := env.Lookup("OSS_PULL_REQUEST", args)
if !ok {
if matches == nil || len(matches) <= 1 {
return nil, cli.Exit("Unable to retrieve PR number", 1)
}
prStr = matches[1]
}
pr, err := strconv.Atoi(prStr)
if err != nil {
return nil, err
}
return &checkOpts{
Branch: branch,
PR: pr,
SHA: sha,
URL: url,
}, nil
}
// EnterpriseCheckBegin creates the GitHub check and signals the beginning of the downstream build / test process
func EnterpriseCheckBegin(c *cli.Context) error {
var (
ctx = c.Context
client = git.NewGitHubClient(ctx, c.String("github-token"))
)
opts, err := getCheckOpts(os.Environ())
if err != nil {
return err
}
if _, err = git.CreateEnterpriseStatus(ctx, client.Repositories, opts.SHA, opts.URL, "pending"); err != nil {
return err
}
return nil
}
func EnterpriseCheckSuccess(c *cli.Context) error {
return completeEnterpriseCheck(c, true)
}
func EnterpriseCheckFail(c *cli.Context) error {
return completeEnterpriseCheck(c, false)
}
func completeEnterpriseCheck(c *cli.Context, success bool) error {
var (
ctx = c.Context
client = git.NewGitHubClient(ctx, c.String("github-token"))
)
// Update the pull request labels
opts, err := getCheckOpts(os.Environ())
if err != nil {
return err
}
status := "failure"
if success {
status = "success"
}
// Update the GitHub check...
if _, err := git.CreateEnterpriseStatus(ctx, client.Repositories, opts.SHA, opts.URL, status); err != nil {
return err
}
// Delete branch if needed
log.Printf("Checking branch '%s' against '%s'", git.PRCheckRegexp().String(), opts.Branch)
if git.PRCheckRegexp().MatchString(opts.Branch) {
log.Println("Deleting branch", opts.Branch)
if err := git.DeleteEnterpriseBranch(ctx, client.Git, opts.Branch); err != nil {
return fmt.Errorf("error deleting enterprise branch: %w", err)
}
}
label := "enterprise-failed"
if success {
label = "enterprise-ok"
}
return git.AddLabelToPR(ctx, client.Issues, opts.PR, label)
}

View File

@@ -1,69 +0,0 @@
package main
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestGetCheckOpts(t *testing.T) {
t.Run("it should return the checkOpts if the correct environment variables are set", func(t *testing.T) {
args := []string{
"SOURCE_COMMIT=1234",
"DRONE_SOURCE_BRANCH=test",
"DRONE_BUILD_LINK=http://example.com",
"OSS_PULL_REQUEST=1",
}
opts, err := getCheckOpts(args)
require.NoError(t, err)
require.Equal(t, opts.SHA, "1234")
require.Equal(t, opts.URL, "http://example.com")
})
t.Run("it should return an error if SOURCE_COMMIT is not set", func(t *testing.T) {
args := []string{
"DRONE_BUILD_LINK=http://example.com",
"DRONE_SOURCE_BRANCH=test",
"DRONE_BUILD_LINK=http://example.com",
"OSS_PULL_REQUEST=1",
}
opts, err := getCheckOpts(args)
require.Nil(t, opts)
require.Error(t, err)
})
t.Run("it should return an error if DRONE_BUILD_LINK is not set", func(t *testing.T) {
args := []string{
"SOURCE_COMMIT=1234",
"DRONE_SOURCE_BRANCH=test",
"OSS_PULL_REQUEST=1",
}
opts, err := getCheckOpts(args)
require.Nil(t, opts)
require.Error(t, err)
})
t.Run("it should return an error if OSS_PULL_REQUEST is not set", func(t *testing.T) {
args := []string{
"SOURCE_COMMIT=1234",
"DRONE_SOURCE_BRANCH=test",
"DRONE_BUILD_LINK=http://example.com",
}
opts, err := getCheckOpts(args)
require.Nil(t, opts)
require.Error(t, err)
})
t.Run("it should return an error if OSS_PULL_REQUEST is not an integer", func(t *testing.T) {
args := []string{
"SOURCE_COMMIT=1234",
"DRONE_SOURCE_BRANCH=test",
"DRONE_BUILD_LINK=http://example.com",
"OSS_PULL_REQUEST=http://example.com",
}
opts, err := getCheckOpts(args)
require.Nil(t, opts)
require.Error(t, err)
})
}

View File

@@ -1,32 +0,0 @@
package main
import (
"os"
"path/filepath"
"github.com/urfave/cli/v2"
"github.com/grafana/grafana/pkg/build/config"
)
func ExportVersion(c *cli.Context) error {
metadata, err := config.GenerateMetadata(c)
if err != nil {
return err
}
const distDir = "dist"
if err := os.RemoveAll(distDir); err != nil {
return err
}
if err := os.Mkdir(distDir, 0750); err != nil {
return err
}
// nolint:gosec
if err := os.WriteFile(filepath.Join(distDir, "grafana.version"), []byte(metadata.GrafanaVersion), 0664); err != nil {
return err
}
return nil
}

View File

@@ -4,7 +4,7 @@ import (
"fmt"
"log"
"os/exec"
"strings"
"path/filepath"
"github.com/urfave/cli/v2"
@@ -18,6 +18,25 @@ const (
ubuntu = "ubuntu"
)
// GetImageFiles returns the list of image (.img, but should be .tar because they are tar archives) files that are
// created in the 'tag' process and stored in the prerelease bucket, waiting to be released.
func GetImageFiles(grafana string, version string, architectures []config.Architecture) []string {
bases := []string{alpine, ubuntu}
images := []string{}
for _, base := range bases {
for _, arch := range architectures {
image := fmt.Sprintf("%s-%s-%s.img", grafana, version, arch)
if base == "ubuntu" {
image = fmt.Sprintf("%s-%s-ubuntu-%s.img", grafana, version, arch)
}
images = append(images, image)
}
}
return images
}
func FetchImages(c *cli.Context) error {
if c.NArg() > 0 {
if err := cli.ShowSubcommandHelp(c); err != nil {
@@ -44,74 +63,65 @@ func FetchImages(c *cli.Context) error {
Tag: metadata.GrafanaVersion,
}
edition := fmt.Sprintf("-%s", cfg.Edition)
grafana := "grafana"
if cfg.Edition == "enterprise" {
grafana = "grafana-enterprise"
}
if cfg.Edition == "enterprise2" {
grafana = "grafana-enterprise2"
}
if cfg.Edition == "grafana" || cfg.Edition == "oss" {
grafana = "grafana-oss"
}
err = gcloud.ActivateServiceAccount()
if err != nil {
baseURL := fmt.Sprintf("gs://%s/%s/", cfg.Bucket, cfg.Tag)
images := GetImageFiles(grafana, cfg.Tag, cfg.Archs)
log.Printf("Fetching images [%v]", images)
if err := gcloud.ActivateServiceAccount(); err != nil {
return err
}
var basesStr []string
for _, base := range cfg.Distribution {
switch base {
case alpine:
basesStr = append(basesStr, "")
case ubuntu:
basesStr = append(basesStr, "-ubuntu")
default:
return fmt.Errorf("unrecognized base %q", base)
}
}
err = downloadFromGCS(cfg, basesStr, edition)
if err != nil {
if err := DownloadImages(baseURL, images, "."); err != nil {
return err
}
err = loadImages(cfg, basesStr, edition)
if err != nil {
if err := LoadImages(images, "."); err != nil {
return err
}
return nil
}
func loadImages(cfg docker.Config, basesStr []string, edition string) error {
log.Println("Loading fetched image files to local docker registry...")
log.Printf("Number of images to be loaded: %d\n", len(basesStr)*len(cfg.Archs))
for _, base := range basesStr {
for _, arch := range cfg.Archs {
imageFilename := fmt.Sprintf("grafana%s-%s%s-%s.img", edition, cfg.Tag, base, arch)
log.Printf("image file name: %s\n", imageFilename)
//nolint:gosec
cmd := exec.Command("docker", "load", "-i", imageFilename)
cmd.Dir = "."
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("out: %s\n", out)
return fmt.Errorf("error loading image: %q", err)
}
log.Printf("Successfully loaded %s!\n %s\n", fmt.Sprintf("grafana%s-%s%s-%s", edition, cfg.Tag, base, arch), out)
// LoadImages uses the `docker load -i` command to load the image tar file into the docker daemon so that it can be
// tagged and pushed.
func LoadImages(images []string, source string) error {
p := filepath.Clean(source)
for _, image := range images {
image := filepath.Join(p, image)
log.Println("Loading image", image)
//nolint:gosec
cmd := exec.Command("docker", "load", "-i", image)
cmd.Dir = "."
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("out: %s\n", out)
return fmt.Errorf("error loading image: %q", err)
}
log.Println("Loaded image", image)
}
log.Println("Images successfully loaded!")
return nil
}
func downloadFromGCS(cfg docker.Config, basesStr []string, edition string) error {
log.Printf("Downloading Docker images from GCS bucket: %s\n", cfg.Bucket)
for _, base := range basesStr {
for _, arch := range cfg.Archs {
src := fmt.Sprintf("gs://%s/%s/grafana%s-%s%s-%s.img", cfg.Bucket, cfg.Tag, edition, cfg.Tag, base, arch)
args := strings.Split(fmt.Sprintf("-m cp -r %s .", src), " ")
//nolint:gosec
cmd := exec.Command("gsutil", args...)
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("failed to download: %w\n%s", err, out)
}
func DownloadImages(baseURL string, images []string, destination string) error {
for _, image := range images {
p := baseURL + image
log.Println("Downloading image", p)
//nolint:gosec
cmd := exec.Command("gsutil", "-m", "cp", "-r", p, destination)
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("failed to download: %w\n%s", err, out)
}
}
log.Printf("Successfully fetched image files from %s bucket!\n", cfg.Bucket)
return nil
}

View File

@@ -0,0 +1,48 @@
package main
import (
"testing"
"github.com/grafana/grafana/pkg/build/config"
"github.com/stretchr/testify/require"
)
func TestGetImageFiles(t *testing.T) {
var (
architectures = []config.Architecture{
config.ArchAMD64,
config.ArchARM64,
config.ArchARMv7,
}
)
t.Run("1.2.3", func(t *testing.T) {
expect := []string{
"grafana-oss-1.2.3-amd64.img",
"grafana-oss-1.2.3-arm64.img",
"grafana-oss-1.2.3-armv7.img",
"grafana-oss-1.2.3-ubuntu-amd64.img",
"grafana-oss-1.2.3-ubuntu-arm64.img",
"grafana-oss-1.2.3-ubuntu-armv7.img",
}
res := GetImageFiles("grafana-oss", "1.2.3", architectures)
require.Equal(t, expect, res)
})
t.Run("1.2.3+example-01", func(t *testing.T) {
expect := []string{
"grafana-oss-1.2.3+example-01-amd64.img",
"grafana-oss-1.2.3+example-01-arm64.img",
"grafana-oss-1.2.3+example-01-armv7.img",
"grafana-oss-1.2.3+example-01-ubuntu-amd64.img",
"grafana-oss-1.2.3+example-01-ubuntu-arm64.img",
"grafana-oss-1.2.3+example-01-ubuntu-armv7.img",
}
res := GetImageFiles("grafana-oss", "1.2.3+example-01", architectures)
require.Equal(t, expect, res)
})
}

View File

@@ -16,37 +16,15 @@ var (
Usage: "The edition of Grafana to build (oss or enterprise)",
Value: "oss",
}
variantsFlag = cli.StringFlag{
Name: "variants",
Usage: "Comma-separated list of variants to build",
}
triesFlag = cli.IntFlag{
Name: "tries",
Usage: "Specify number of tries before failing",
Value: 1,
}
noInstallDepsFlag = cli.BoolFlag{
Name: "no-install-deps",
Usage: "Don't install dependencies",
}
signingAdminFlag = cli.BoolFlag{
Name: "signing-admin",
Usage: "Use manifest signing admin API endpoint?",
}
signFlag = cli.BoolFlag{
Name: "sign",
Usage: "Enable plug-in signing (you must set GRAFANA_API_KEY)",
}
dryRunFlag = cli.BoolFlag{
Name: "dry-run",
Usage: "Only simulate actions",
}
gitHubTokenFlag = cli.StringFlag{
Name: "github-token",
Value: "",
EnvVars: []string{"GITHUB_TOKEN"},
Usage: "GitHub token",
}
tagFlag = cli.StringFlag{
Name: "tag",
Usage: "Grafana version tag",

View File

@@ -19,6 +19,7 @@ import (
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/gcloud"
"github.com/grafana/grafana/pkg/build/gcloud/storage"
"github.com/grafana/grafana/pkg/build/gcom"
"github.com/grafana/grafana/pkg/build/packaging"
)
@@ -125,6 +126,51 @@ func getReleaseURLs() (string, string, error) {
return pconf.Grafana.WhatsNewURL, pconf.Grafana.ReleaseNotesURL, nil
}
func Builds(baseURL *url.URL, grafana, version string, packages []packaging.BuildArtifact) ([]GCOMPackage, error) {
builds := make([]GCOMPackage, len(packages))
for i, v := range packages {
var (
os = v.Distro
arch = v.Arch
)
if v.Distro == "windows" {
os = "win"
if v.Ext == "msi" {
os = "win-installer"
}
}
if v.Distro == "rhel" {
if arch == "aarch64" {
arch = "arm64"
}
if arch == "x86_64" {
arch = "amd64"
}
}
if v.Distro == "deb" {
if arch == "armhf" {
arch = "armv7"
if v.RaspberryPi {
log.Println(v.Distro, arch, "raspberrypi == true")
arch = "armv6"
}
}
}
u := gcom.GetURL(baseURL, version, grafana, v.Distro, v.Arch, v.Ext, v.Musl, v.RaspberryPi)
builds[i] = GCOMPackage{
OS: os,
URL: u.String(),
Arch: arch,
}
}
return builds, nil
}
// publishPackages publishes packages to grafana.com.
func publishPackages(cfg packaging.PublishConfig) error {
log.Printf("Publishing Grafana packages, version %s, %s edition, %s mode, dryRun: %v, simulating: %v...\n",
@@ -133,14 +179,17 @@ func publishPackages(cfg packaging.PublishConfig) error {
versionStr := fmt.Sprintf("v%s", cfg.Version)
log.Printf("Creating release %s at grafana.com...\n", versionStr)
var sfx string
var pth string
var (
pth string
grafana = "grafana"
)
switch cfg.Edition {
case config.EditionOSS:
pth = "oss"
case config.EditionEnterprise:
grafana = "grafana-enterprise"
pth = "enterprise"
sfx = packaging.EnterpriseSfx
default:
return fmt.Errorf("unrecognized edition %q", cfg.Edition)
}
@@ -152,28 +201,19 @@ func publishPackages(cfg packaging.PublishConfig) error {
pth = path.Join(pth, packaging.ReleaseFolder)
}
product := fmt.Sprintf("grafana%s", sfx)
pth = path.Join(pth, product)
baseArchiveURL := fmt.Sprintf("https://dl.grafana.com/%s", pth)
builds := make([]buildRepr, len(packaging.ArtifactConfigs))
for i, ba := range packaging.ArtifactConfigs {
u := ba.GetURL(baseArchiveURL, cfg)
sha256, err := getSHA256(u)
if err != nil {
return err
}
builds[i] = buildRepr{
OS: ba.Os,
URL: u,
SHA256: string(sha256),
Arch: ba.Arch,
}
pth = path.Join(pth)
baseArchiveURL := &url.URL{
Scheme: "https",
Host: "dl.grafana.com",
Path: pth,
}
r := releaseRepr{
builds, err := Builds(baseArchiveURL, grafana, cfg.Version, packaging.ArtifactConfigs)
if err != nil {
return err
}
r := Release{
Version: cfg.Version,
ReleaseDate: time.Now().UTC(),
Builds: builds,
@@ -195,6 +235,15 @@ func publishPackages(cfg packaging.PublishConfig) error {
return err
}
for i, v := range r.Builds {
sha, err := getSHA256(v.URL)
if err != nil {
return err
}
r.Builds[i].SHA256 = string(sha)
}
for _, b := range r.Builds {
if err := postRequest(cfg, fmt.Sprintf("versions/%s/packages", cfg.Version), b,
fmt.Sprintf("create build %s %s", b.OS, b.Arch)); err != nil {
@@ -211,6 +260,7 @@ func publishPackages(cfg packaging.PublishConfig) error {
func getSHA256(u string) ([]byte, error) {
shaURL := fmt.Sprintf("%s.sha256", u)
// nolint:gosec
resp, err := http.Get(shaURL)
if err != nil {
@@ -232,7 +282,7 @@ func getSHA256(u string) ([]byte, error) {
return sha256, nil
}
func postRequest(cfg packaging.PublishConfig, pth string, obj any, descr string) error {
func postRequest(cfg packaging.PublishConfig, pth string, body any, descr string) error {
var sfx string
switch cfg.Edition {
case config.EditionOSS:
@@ -243,7 +293,7 @@ func postRequest(cfg packaging.PublishConfig, pth string, obj any, descr string)
}
product := fmt.Sprintf("grafana%s", sfx)
jsonB, err := json.Marshal(obj)
jsonB, err := json.Marshal(body)
if err != nil {
return fmt.Errorf("failed to JSON encode release: %w", err)
}
@@ -303,20 +353,20 @@ func constructURL(product string, pth string) (string, error) {
return u.String(), err
}
type buildRepr struct {
type GCOMPackage struct {
OS string `json:"os"`
URL string `json:"url"`
SHA256 string `json:"sha256"`
Arch string `json:"arch"`
}
type releaseRepr struct {
Version string `json:"version"`
ReleaseDate time.Time `json:"releaseDate"`
Stable bool `json:"stable"`
Beta bool `json:"beta"`
Nightly bool `json:"nightly"`
WhatsNewURL string `json:"whatsNewUrl"`
ReleaseNotesURL string `json:"releaseNotesUrl"`
Builds []buildRepr `json:"-"`
type Release struct {
Version string `json:"version"`
ReleaseDate time.Time `json:"releaseDate"`
Stable bool `json:"stable"`
Beta bool `json:"beta"`
Nightly bool `json:"nightly"`
WhatsNewURL string `json:"whatsNewUrl"`
ReleaseNotesURL string `json:"releaseNotesUrl"`
Builds []GCOMPackage `json:"-"`
}

View File

@@ -1,7 +1,14 @@
package main
import (
"fmt"
"net/url"
"path"
"testing"
"github.com/grafana/grafana/pkg/build/packaging"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_constructURL(t *testing.T) {
@@ -33,3 +40,221 @@ func Test_constructURL(t *testing.T) {
})
}
}
func TestBuilds(t *testing.T) {
baseURL := &url.URL{
Scheme: "https",
Host: "dl.example.com",
Path: path.Join("oss", "release"),
}
version := "1.2.3"
grafana := "grafana"
packages := []packaging.BuildArtifact{
{
Distro: "deb",
Arch: "arm64",
Ext: "deb",
},
{
Distro: "rhel",
Arch: "aarch64",
Ext: "rpm",
},
{
Distro: "linux",
Arch: "arm64",
Ext: "tar.gz",
},
{
Distro: "deb",
Arch: "armhf",
Ext: "deb",
RaspberryPi: true,
},
{
Distro: "deb",
Arch: "armhf",
Ext: "deb",
},
{
Distro: "linux",
Arch: "armv7",
Ext: "tar.gz",
},
{
Distro: "windows",
Arch: "amd64",
Ext: "zip",
},
{
Distro: "windows",
Arch: "amd64",
Ext: "msi",
},
}
expect := []GCOMPackage{
{
URL: "https://dl.example.com/oss/release/grafana_1.2.3_arm64.deb",
OS: "deb",
Arch: "arm64",
},
{
URL: "https://dl.example.com/oss/release/grafana-1.2.3-1.aarch64.rpm",
OS: "rhel",
Arch: "arm64",
},
{
URL: "https://dl.example.com/oss/release/grafana-1.2.3.linux-arm64.tar.gz",
OS: "linux",
Arch: "arm64",
},
{
URL: "https://dl.example.com/oss/release/grafana-rpi_1.2.3_armhf.deb",
OS: "deb",
Arch: "armv6",
},
{
URL: "https://dl.example.com/oss/release/grafana_1.2.3_armhf.deb",
OS: "deb",
Arch: "armv7",
},
{
URL: "https://dl.example.com/oss/release/grafana-1.2.3.linux-armv7.tar.gz",
OS: "linux",
Arch: "armv7",
},
{
URL: "https://dl.example.com/oss/release/grafana-1.2.3.windows-amd64.zip",
OS: "win",
Arch: "amd64",
},
{
URL: "https://dl.example.com/oss/release/grafana-1.2.3.windows-amd64.msi",
OS: "win-installer",
Arch: "amd64",
},
}
builds, err := Builds(baseURL, grafana, version, packages)
require.NoError(t, err)
require.Equal(t, len(expect), len(builds))
for i := range builds {
t.Run(fmt.Sprintf("[%d/%d] %s", i+1, len(builds), expect[i].URL), func(t *testing.T) {
assert.Equal(t, expect[i].URL, builds[i].URL)
assert.Equal(t, expect[i].OS, builds[i].OS)
assert.Equal(t, expect[i].Arch, builds[i].Arch)
})
}
}
func TestBuildsWithPlus(t *testing.T) {
baseURL := &url.URL{
Scheme: "https",
Host: "dl.example.com",
Path: path.Join("oss", "release"),
}
version := "1.2.3+example-01"
grafana := "grafana"
packages := []packaging.BuildArtifact{
{
Distro: "deb",
Arch: "arm64",
Ext: "deb",
},
{
Distro: "rhel",
Arch: "aarch64",
Ext: "rpm",
},
{
Distro: "linux",
Arch: "arm64",
Ext: "tar.gz",
},
{
Distro: "deb",
Arch: "armhf",
Ext: "deb",
RaspberryPi: true,
},
{
Distro: "deb",
Arch: "armhf",
Ext: "deb",
},
{
Distro: "linux",
Arch: "armv7",
Ext: "tar.gz",
},
{
Distro: "windows",
Arch: "amd64",
Ext: "zip",
},
{
Distro: "windows",
Arch: "amd64",
Ext: "msi",
},
}
expect := []GCOMPackage{
{
URL: "https://dl.example.com/oss/release/grafana_1.2.3+example~01_arm64.deb",
OS: "deb",
Arch: "arm64",
},
{
URL: "https://dl.example.com/oss/release/grafana-1.2.3+example~01-1.aarch64.rpm",
OS: "rhel",
Arch: "arm64",
},
{
URL: "https://dl.example.com/oss/release/grafana-1.2.3+example-01.linux-arm64.tar.gz",
OS: "linux",
Arch: "arm64",
},
{
URL: "https://dl.example.com/oss/release/grafana-rpi_1.2.3+example~01_armhf.deb",
OS: "deb",
Arch: "armv6",
},
{
URL: "https://dl.example.com/oss/release/grafana_1.2.3+example~01_armhf.deb",
OS: "deb",
Arch: "armv7",
},
{
URL: "https://dl.example.com/oss/release/grafana-1.2.3+example-01.linux-armv7.tar.gz",
OS: "linux",
Arch: "armv7",
},
{
URL: "https://dl.example.com/oss/release/grafana-1.2.3+example-01.windows-amd64.zip",
OS: "win",
Arch: "amd64",
},
{
URL: "https://dl.example.com/oss/release/grafana-1.2.3+example-01.windows-amd64.msi",
OS: "win-installer",
Arch: "amd64",
},
}
builds, err := Builds(baseURL, grafana, version, packages)
require.NoError(t, err)
require.Equal(t, len(expect), len(builds))
for i := range builds {
t.Run(fmt.Sprintf("[%d/%d] %s", i+1, len(builds), expect[i].URL), func(t *testing.T) {
assert.Equal(t, expect[i].URL, builds[i].URL)
assert.Equal(t, expect[i].OS, builds[i].OS)
assert.Equal(t, expect[i].Arch, builds[i].Arch)
})
}
}

View File

@@ -3,11 +3,9 @@ package main
import (
"log"
"os"
"strings"
"github.com/grafana/grafana/pkg/build"
"github.com/urfave/cli/v2"
"github.com/grafana/grafana/pkg/build/docker"
)
var additionalCommands []*cli.Command = make([]*cli.Command, 0, 5)
@@ -21,28 +19,8 @@ func main() {
app := cli.NewApp()
app.Commands = cli.Commands{
{
Name: "build-backend",
Usage: "Build one or more variants of back-end binaries",
ArgsUsage: "[version]",
Action: MaxArgCountWrapper(1, BuildBackend),
Flags: []cli.Flag{
&jobsFlag,
&variantsFlag,
&editionFlag,
&buildIDFlag,
},
},
{
Name: "build-frontend-packages",
Usage: "Build front-end packages",
ArgsUsage: "[version]",
Action: BuildFrontendPackages,
Flags: []cli.Flag{
&jobsFlag,
&editionFlag,
&buildIDFlag,
&noInstallDepsFlag,
},
Name: "build",
Action: build.RunCmdCLI,
},
{
Name: "e2e-tests",
@@ -71,44 +49,11 @@ func main() {
},
},
},
{
Name: "build-frontend",
Usage: "Build front-end artifacts",
ArgsUsage: "[version]",
Action: MaxArgCountWrapper(1, BuildFrontend),
Flags: []cli.Flag{
&jobsFlag,
&editionFlag,
&buildIDFlag,
},
},
{
Name: "whatsnew-checker",
Usage: "Checks whatsNewUrl in package.json for differences between the tag and the docs version",
Action: WhatsNewChecker,
},
{
Name: "build-docker",
Usage: "Build Grafana Docker images",
Action: MaxArgCountWrapper(1, BuildDocker),
Flags: []cli.Flag{
&jobsFlag,
&editionFlag,
&cli.BoolFlag{
Name: "ubuntu",
Usage: "Use Ubuntu base image",
},
&cli.BoolFlag{
Name: "shouldSave",
Usage: "Should save docker image to tarball",
},
&cli.StringFlag{
Name: "archs",
Value: strings.Join(docker.AllArchs, ","),
Usage: "Comma separated architectures to build",
},
},
},
{
Name: "upload-cdn",
Usage: "Upload public/* to a cdn bucket",
@@ -117,23 +62,6 @@ func main() {
&editionFlag,
},
},
{
Name: "shellcheck",
Usage: "Run shellcheck on shell scripts",
Action: Shellcheck,
},
{
Name: "build-plugins",
Usage: "Build internal plug-ins",
Action: MaxArgCountWrapper(1, BuildInternalPlugins),
Flags: []cli.Flag{
&jobsFlag,
&editionFlag,
&signingAdminFlag,
&signFlag,
&noInstallDepsFlag,
},
},
{
Name: "publish-metrics",
Usage: "Publish a set of metrics from stdin",
@@ -145,30 +73,6 @@ func main() {
Usage: "Verify Drone configuration",
Action: VerifyDrone,
},
{
Name: "verify-starlark",
Usage: "Verify Starlark configuration",
ArgsUsage: "<workspace path>",
Action: VerifyStarlark,
},
{
Name: "export-version",
Usage: "Exports version in dist/grafana.version",
Action: ExportVersion,
},
{
Name: "package",
Usage: "Package one or more Grafana variants",
ArgsUsage: "[version]",
Action: MaxArgCountWrapper(1, Package),
Flags: []cli.Flag{
&jobsFlag,
&variantsFlag,
&editionFlag,
&buildIDFlag,
&signFlag,
},
},
{
Name: "store-storybook",
Usage: "Stores storybook to GCS buckets",
@@ -279,18 +183,6 @@ func main() {
&editionFlag,
},
},
{
Name: "publish-enterprise2",
Usage: "Handle Grafana Enterprise2 Docker images",
ArgsUsage: "[version]",
Action: Enterprise2,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "dockerhub-repo",
Usage: "DockerHub repo to push images",
},
},
},
},
},
{
@@ -399,36 +291,6 @@ func main() {
},
},
},
{
Name: "enterprise-check",
Usage: "Commands for testing against Grafana Enterprise",
Subcommands: cli.Commands{
{
Name: "begin",
Usage: "Creates the GitHub check in a pull request and begins the tests",
Action: EnterpriseCheckBegin,
Flags: []cli.Flag{
&gitHubTokenFlag,
},
},
{
Name: "success",
Usage: "Updates the GitHub check in a pull request to show a successful build and updates the pull request labels",
Action: EnterpriseCheckSuccess,
Flags: []cli.Flag{
&gitHubTokenFlag,
},
},
{
Name: "fail",
Usage: "Updates the GitHub check in a pull request to show a failed build and updates the pull request labels",
Action: EnterpriseCheckFail,
Flags: []cli.Flag{
&gitHubTokenFlag,
},
},
},
},
}
app.Commands = append(app.Commands, additionalCommands...)

View File

@@ -2,6 +2,7 @@ package main
import (
"fmt"
"log"
"os"
"strings"
@@ -23,6 +24,11 @@ func NpmRetrieveAction(c *cli.Context) error {
return fmt.Errorf("no tag version specified, exitting")
}
if strings.Contains(tag, "security") {
log.Printf("skipping npm publish because version '%s' has 'security'", tag)
return nil
}
prereleaseBucket := strings.TrimSpace(os.Getenv("PRERELEASE_BUCKET"))
if prereleaseBucket == "" {
return cli.Exit("the environment variable PRERELEASE_BUCKET must be set", 1)
@@ -48,6 +54,11 @@ func NpmStoreAction(c *cli.Context) error {
return fmt.Errorf("no tag version specified, exiting")
}
if strings.Contains(tag, "security") {
log.Printf("skipping npm publish because version '%s' has 'security'", tag)
return nil
}
prereleaseBucket := strings.TrimSpace(os.Getenv("PRERELEASE_BUCKET"))
if prereleaseBucket == "" {
return cli.Exit("the environment variable PRERELEASE_BUCKET must be set", 1)
@@ -73,6 +84,11 @@ func NpmReleaseAction(c *cli.Context) error {
return fmt.Errorf("no tag version specified, exitting")
}
if strings.Contains(tag, "security") {
log.Printf("skipping npm publish because version '%s' has 'security'", tag)
return nil
}
err := npm.PublishNpmPackages(c.Context, tag)
if err != nil {
return err

View File

@@ -1,80 +0,0 @@
package main
import (
"context"
"log"
"strings"
"github.com/urfave/cli/v2"
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/gpg"
"github.com/grafana/grafana/pkg/build/packaging"
"github.com/grafana/grafana/pkg/build/syncutil"
)
func Package(c *cli.Context) error {
metadata, err := config.GenerateMetadata(c)
if err != nil {
return err
}
edition := config.Edition(c.String("edition"))
releaseMode, err := metadata.GetReleaseMode()
if err != nil {
return cli.Exit(err.Error(), 1)
}
releaseModeConfig, err := config.GetBuildConfig(metadata.ReleaseMode.Mode)
if err != nil {
return cli.Exit(err.Error(), 1)
}
cfg := config.Config{
NumWorkers: c.Int("jobs"),
SignPackages: c.Bool("sign"),
}
ctx := context.Background()
variants := []config.Variant{}
variantStrs := strings.Split(c.String("variants"), ",")
if c.String("variants") != "" {
for _, varStr := range variantStrs {
if varStr == "" {
continue
}
variants = append(variants, config.Variant(varStr))
}
} else {
variants = releaseModeConfig.Variants
}
if len(variants) == 0 {
variants = config.AllVariants
}
log.Printf("Packaging Grafana version %q, version mode %s, %s edition, variants %s", metadata.GrafanaVersion, releaseMode.Mode,
edition, strings.Join(variantStrs, ","))
if cfg.SignPackages {
if err := gpg.LoadGPGKeys(&cfg); err != nil {
return cli.Exit(err, 1)
}
defer gpg.RemoveGPGFiles(cfg)
if err := gpg.Import(cfg); err != nil {
return cli.Exit(err, 1)
}
}
p := syncutil.NewWorkerPool(cfg.NumWorkers)
defer p.Close()
if err := packaging.PackageGrafana(ctx, metadata.GrafanaVersion, ".", cfg, edition, variants, releaseModeConfig.PluginSignature.Sign, p); err != nil {
return cli.Exit(err, 1)
}
log.Println("Successfully packaged Grafana!")
return nil
}

View File

@@ -1,101 +0,0 @@
package main
import (
"fmt"
"log"
"os"
"os/exec"
"github.com/urfave/cli/v2"
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/docker"
"github.com/grafana/grafana/pkg/build/gcloud"
)
func Enterprise2(c *cli.Context) error {
if c.NArg() > 0 {
if err := cli.ShowSubcommandHelp(c); err != nil {
return cli.Exit(err.Error(), 1)
}
return cli.Exit("", 1)
}
if err := gcloud.ActivateServiceAccount(); err != nil {
return fmt.Errorf("couldn't activate service account, err: %w", err)
}
metadata, err := config.GenerateMetadata(c)
if err != nil {
return err
}
buildConfig, err := config.GetBuildConfig(metadata.ReleaseMode.Mode)
if err != nil {
return err
}
cfg := docker.Config{
Archs: buildConfig.Docker.Architectures,
Distribution: buildConfig.Docker.Distribution,
DockerHubRepo: c.String("dockerhub-repo"),
Tag: metadata.GrafanaVersion,
}
err = dockerLoginEnterprise2()
if err != nil {
return err
}
var distributionStr []string
for _, distribution := range cfg.Distribution {
switch distribution {
case alpine:
distributionStr = append(distributionStr, "")
case ubuntu:
distributionStr = append(distributionStr, "-ubuntu")
default:
return fmt.Errorf("unrecognized distribution %q", distribution)
}
}
for _, distribution := range distributionStr {
var imageFileNames []string
for _, arch := range cfg.Archs {
imageFilename := fmt.Sprintf("%s:%s%s-%s", cfg.DockerHubRepo, cfg.Tag, distribution, arch)
err := docker.PushImage(imageFilename)
if err != nil {
return err
}
imageFileNames = append(imageFileNames, imageFilename)
}
manifest := fmt.Sprintf("%s:%s%s", cfg.DockerHubRepo, cfg.Tag, distribution)
args := []string{"manifest", "create", manifest}
args = append(args, imageFileNames...)
//nolint:gosec
cmd := exec.Command("docker", args...)
cmd.Env = append(os.Environ(), "DOCKER_CLI_EXPERIMENTAL=enabled")
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to create Docker manifest: %w\n%s", err, output)
}
err = docker.PushManifest(manifest)
if err != nil {
return err
}
}
return nil
}
func dockerLoginEnterprise2() error {
log.Println("Docker login...")
cmd := exec.Command("gcloud", "auth", "configure-docker")
if out, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("error logging in to DockerHub: %s %q", out, err)
}
log.Println("Successful login!")
return nil
}

View File

@@ -1,42 +0,0 @@
package main
import (
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/urfave/cli/v2"
)
func Shellcheck(c *cli.Context) error {
log.Println("Running shellcheck...")
fpaths := []string{}
if err := filepath.Walk("scripts", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if strings.HasSuffix(path, ".sh") {
fpaths = append(fpaths, path)
}
return nil
}); err != nil {
return fmt.Errorf("couldn't traverse scripts/: %w", err)
}
log.Printf("Running shellcheck on %s", strings.Join(fpaths, ","))
args := append([]string{"-e", "SC1071", "-e", "SC2162"}, fpaths...)
//nolint:gosec
cmd := exec.Command("shellcheck", args...)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("shellcheck failed: %s", output)
}
log.Println("Successfully ran shellcheck!")
return nil
}

View File

@@ -7,6 +7,7 @@ import (
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"github.com/urfave/cli/v2"
@@ -14,9 +15,28 @@ import (
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/droneutil"
"github.com/grafana/grafana/pkg/build/gcloud"
"github.com/grafana/grafana/pkg/build/packaging"
)
// PackageRegexp returns a regexp for matching packages corresponding to a certain Grafana edition.
func PackageRegexp(edition config.Edition) *regexp.Regexp {
var sfx string
switch edition {
case config.EditionOSS:
case config.EditionEnterprise:
sfx = "-enterprise"
case config.EditionEnterprise2:
sfx = "-enterprise2"
default:
panic(fmt.Sprintf("unrecognized edition %q", edition))
}
rePkg, err := regexp.Compile(fmt.Sprintf(`^grafana%s(?:-rpi)?[-_][^-_]+.*$`, sfx))
if err != nil {
panic(fmt.Sprintf("Failed to compile regexp: %s", err))
}
return rePkg
}
const releaseFolder = "release"
const mainFolder = "main"
const releaseBranchFolder = "prerelease"
@@ -181,7 +201,7 @@ func uploadPackages(cfg uploadConfig) error {
return fmt.Errorf("failed to list packages: %w", err)
}
fpaths := []string{}
rePkg := packaging.PackageRegexp(cfg.edition)
rePkg := PackageRegexp(cfg.edition)
for _, fpath := range matches {
fname := filepath.Base(fpath)
if strings.Contains(fname, "latest") || !rePkg.MatchString(fname) {

View File

@@ -1,142 +0,0 @@
package main
import (
"context"
"errors"
"fmt"
"io/fs"
"os/exec"
"path/filepath"
"strings"
"github.com/urfave/cli/v2"
)
func mapSlice[I any, O any](a []I, f func(I) O) []O {
o := make([]O, len(a))
for i, e := range a {
o[i] = f(e)
}
return o
}
// VerifyStarlark is the CLI Action for verifying Starlark files in a workspace.
// It expects a single context argument which is the path to the workspace.
// The actual verification procedure can return multiple errors which are
// joined together to be one holistic error for the action.
func VerifyStarlark(c *cli.Context) error {
if c.NArg() != 1 {
var message string
if c.NArg() == 0 {
message = "ERROR: missing required argument <workspace path>"
}
if c.NArg() > 1 {
message = "ERROR: too many arguments"
}
if err := cli.ShowSubcommandHelp(c); err != nil {
return err
}
return cli.Exit(message, 1)
}
workspace := c.Args().Get(0)
verificationErrs, executionErr := verifyStarlark(c.Context, workspace, buildifierLintCommand)
if executionErr != nil {
return executionErr
}
if len(verificationErrs) == 0 {
return nil
}
noun := "file"
if len(verificationErrs) > 1 {
noun += "s"
}
return fmt.Errorf("verification failed for %d %s:\n%s",
len(verificationErrs),
noun,
strings.Join(
mapSlice(verificationErrs, func(e error) string { return e.Error() }),
"\n",
))
}
type commandFunc = func(path string) (command string, args []string)
func buildifierLintCommand(path string) (string, []string) {
return "buildifier", []string{"-lint", "warn", "-mode", "check", path}
}
// verifyStarlark walks all directories starting at provided workspace path and
// verifies any Starlark files it finds.
// Starlark files are assumed to end with the .star extension.
// The verification relies on linting frovided by the 'buildifier' binary which
// must be in the PATH.
// A slice of verification errors are returned, one for each file that failed verification.
// If any execution of the `buildifier` command fails, this is returned separately.
// commandFn is executed on every Starlark file to determine the command and arguments to be executed.
// The caller is trusted and it is the callers responsibility to ensure that the resulting command is safe to execute.
func verifyStarlark(ctx context.Context, workspace string, commandFn commandFunc) ([]error, error) {
var verificationErrs []error
// All errors from filepath.WalkDir are filtered by the fs.WalkDirFunc.
// Lstat or ReadDir errors are reported as verificationErrors.
// If any execution of the `buildifier` command fails or if the context is cancelled,
// it is reported as an error and any verification of subsequent files is skipped.
err := filepath.WalkDir(workspace, func(path string, d fs.DirEntry, err error) error {
// Skip verification of the file or files within the directory if there is an error
// returned by Lstat or ReadDir.
if err != nil {
verificationErrs = append(verificationErrs, err)
return nil
}
if d.IsDir() {
return nil
}
if filepath.Ext(path) == ".star" {
command, args := commandFn(path)
// The caller is trusted.
//nolint:gosec
cmd := exec.CommandContext(ctx, command, args...)
cmd.Dir = workspace
_, err = cmd.Output()
if err == nil { // No error, early return.
return nil
}
// The error returned from cmd.Output() is never wrapped.
//nolint:errorlint
if err, ok := err.(*exec.ExitError); ok {
switch err.ExitCode() {
// Case comments are informed by the output of `buildifier --help`
case 1: // syntax errors in input
verificationErrs = append(verificationErrs, errors.New(string(err.Stderr)))
return nil
case 2: // usage errors: invoked incorrectly
return fmt.Errorf("command %q: %s", cmd, err.Stderr)
case 3: // unexpected runtime errors: file I/O problems or internal bugs
return fmt.Errorf("command %q: %s", cmd, err.Stderr)
case 4: // check mode failed (reformat is needed)
verificationErrs = append(verificationErrs, errors.New(string(err.Stderr)))
return nil
default:
return fmt.Errorf("command %q: %s", cmd, err.Stderr)
}
}
// Error was not an exit error from the command.
return fmt.Errorf("command %q: %v", cmd, err)
}
return nil
})
return verificationErrs, err
}

View File

@@ -1,137 +0,0 @@
//go:build requires_buildifier
package main
import (
"context"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func TestVerifyStarlark(t *testing.T) {
t.Run("execution errors", func(t *testing.T) {
t.Run("invalid usage", func(t *testing.T) {
ctx := context.Background()
workspace := t.TempDir()
err := os.WriteFile(filepath.Join(workspace, "ignored.star"), []byte{}, os.ModePerm)
if err != nil {
t.Fatalf(err.Error())
}
_, executionErr := verifyStarlark(ctx, workspace, func(string) (string, []string) { return "buildifier", []string{"--invalid"} })
if executionErr == nil {
t.Fatalf("Expected execution error but got none")
}
})
t.Run("context cancellation", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
workspace := t.TempDir()
err := os.WriteFile(filepath.Join(workspace, "ignored.star"), []byte{}, os.ModePerm)
if err != nil {
t.Fatalf(err.Error())
}
err = os.WriteFile(filepath.Join(workspace, "other-ignored.star"), []byte{}, os.ModePerm)
if err != nil {
t.Fatalf(err.Error())
}
cancel()
_, executionErr := verifyStarlark(ctx, workspace, buildifierLintCommand)
if executionErr == nil {
t.Fatalf("Expected execution error but got none")
}
})
})
t.Run("verification errors", func(t *testing.T) {
t.Run("a single file with lint", func(t *testing.T) {
ctx := context.Background()
workspace := t.TempDir()
invalidContent := []byte(`load("scripts/drone/other.star", "function")
function()`)
err := os.WriteFile(filepath.Join(workspace, "has-lint.star"), invalidContent, os.ModePerm)
if err != nil {
t.Fatalf(err.Error())
}
verificationErrs, executionErr := verifyStarlark(ctx, workspace, buildifierLintCommand)
if executionErr != nil {
t.Fatalf("Unexpected execution error: %v", executionErr)
}
if len(verificationErrs) == 0 {
t.Fatalf(`"has-lint.star" requires linting but the verifyStarlark function provided no linting error`)
}
if len(verificationErrs) > 1 {
t.Fatalf(`verifyStarlark returned multiple errors for the "has-lint.star" file but only one was expected: %v`, verificationErrs)
}
if !strings.Contains(verificationErrs[0].Error(), "has-lint.star:1: module-docstring: The file has no module docstring.") {
t.Fatalf(`"has-lint.star" is missing a module docstring but the verifyStarlark function linting error did not mention this, instead we got: %v`, verificationErrs[0])
}
})
t.Run("no files with lint", func(t *testing.T) {
ctx := context.Background()
workspace := t.TempDir()
content := []byte(`"""
This module does nothing.
"""
load("scripts/drone/other.star", "function")
function()
`)
require.NoError(t, os.WriteFile(filepath.Join(workspace, "no-lint.star"), content, os.ModePerm))
verificationErrs, executionErr := verifyStarlark(ctx, workspace, buildifierLintCommand)
if executionErr != nil {
t.Fatalf("Unexpected execution error: %v", executionErr)
}
if len(verificationErrs) != 0 {
t.Log(`"no-lint.star" has no lint but the verifyStarlark function provided at least one error`)
for _, err := range verificationErrs {
t.Log(err)
}
t.FailNow()
}
})
t.Run("multiple files with lint", func(t *testing.T) {
ctx := context.Background()
workspace := t.TempDir()
invalidContent := []byte(`load("scripts/drone/other.star", "function")
function()`)
require.NoError(t, os.WriteFile(filepath.Join(workspace, "has-lint.star"), invalidContent, os.ModePerm))
require.NoError(t, os.WriteFile(filepath.Join(workspace, "has-lint2.star"), invalidContent, os.ModePerm))
verificationErrs, executionErr := verifyStarlark(ctx, workspace, buildifierLintCommand)
if executionErr != nil {
t.Fatalf("Unexpected execution error: %v", executionErr)
}
if len(verificationErrs) == 0 {
t.Fatalf(`Two files require linting but the verifyStarlark function provided no linting error`)
}
if len(verificationErrs) == 1 {
t.Fatalf(`Two files require linting but the verifyStarlark function provided only one linting error: %v`, verificationErrs[0])
}
if len(verificationErrs) > 2 {
t.Fatalf(`verifyStarlark returned more errors than expected: %v`, verificationErrs)
}
if !strings.Contains(verificationErrs[0].Error(), "has-lint.star:1: module-docstring: The file has no module docstring.") {
t.Errorf(`"has-lint.star" is missing a module docstring but the verifyStarlark function linting error did not mention this, instead we got: %v`, verificationErrs[0])
}
if !strings.Contains(verificationErrs[1].Error(), "has-lint2.star:1: module-docstring: The file has no module docstring.") {
t.Fatalf(`"has-lint2.star" is missing a module docstring but the verifyStarlark function linting error did not mention this, instead we got: %v`, verificationErrs[0])
}
})
})
}

View File

@@ -1,50 +0,0 @@
package compilers
import (
"fmt"
"os"
"os/exec"
"path/filepath"
)
const (
ArmV6 = "/opt/rpi-tools/arm-bcm2708/arm-linux-gnueabihf/bin/arm-linux-gnueabihf-gcc"
Armv7 = "arm-linux-gnueabihf-gcc"
Armv7Musl = "/tmp/arm-linux-musleabihf-cross/bin/arm-linux-musleabihf-gcc"
Arm64 = "aarch64-linux-gnu-gcc"
Arm64Musl = "/tmp/aarch64-linux-musl-cross/bin/aarch64-linux-musl-gcc"
Osx64 = "/tmp/osxcross/target/bin/o64-clang"
Win64 = "x86_64-w64-mingw32-gcc"
LinuxX64 = "/tmp/x86_64-centos6-linux-gnu/bin/x86_64-centos6-linux-gnu-gcc"
LinuxX64Musl = "/tmp/x86_64-linux-musl-cross/bin/x86_64-linux-musl-gcc"
)
func Install() error {
// From the os.TempDir documentation:
// On Unix systems, it returns $TMPDIR if non-empty,
// else /tmp. On Windows, it uses GetTempPath,
// returning the first non-empty value from %TMP%, %TEMP%, %USERPROFILE%,
// or the Windows directory. On Plan 9, it returns /tmp.
tmp := os.TempDir()
var (
centosArchive = "x86_64-centos6-linux-gnu.tar.xz"
osxArchive = "osxcross.tar.xz"
)
for _, fname := range []string{centosArchive, osxArchive} {
path := filepath.Join(tmp, fname)
if _, err := os.Stat(path); err != nil {
return fmt.Errorf("stat error: %w", err)
}
// Ignore gosec G204 as this function is only used in the build process.
//nolint:gosec
cmd := exec.Command("tar", "xfJ", fname)
cmd.Dir = tmp
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to unpack %q: %q, %w", fname, output, err)
}
}
return nil
}

View File

@@ -1,69 +0,0 @@
package config
import (
"context"
"fmt"
"log"
"strconv"
"time"
"github.com/grafana/grafana/pkg/build/executil"
)
type Revision struct {
Timestamp int64
SHA256 string
EnterpriseCommit string
Branch string
}
func GrafanaTimestamp(ctx context.Context, dir string) (int64, error) {
out, err := executil.OutputAt(ctx, dir, "git", "show", "-s", "--format=%ct")
if err != nil {
return time.Now().Unix(), nil
}
stamp, err := strconv.ParseInt(out, 10, 64)
if err != nil {
return 0, fmt.Errorf("failed to parse output from git show: %q", out)
}
return stamp, nil
}
// GrafanaRevision uses git commands to get information about the checked out Grafana code located at 'grafanaDir'.
// This could maybe be a more generic "Describe" function in the "git" package.
func GrafanaRevision(ctx context.Context, grafanaDir string) (Revision, error) {
stamp, err := GrafanaTimestamp(ctx, grafanaDir)
if err != nil {
return Revision{}, err
}
sha, err := executil.OutputAt(ctx, grafanaDir, "git", "rev-parse", "--short", "HEAD")
if err != nil {
return Revision{}, err
}
enterpriseCommit, err := executil.OutputAt(ctx, grafanaDir, "git", "-C", "../grafana-enterprise", "rev-parse", "--short", "HEAD")
if err != nil {
enterpriseCommit, err = executil.OutputAt(ctx, grafanaDir, "git", "-C", "..", "rev-parse", "--short", "HEAD")
if err != nil {
enterpriseCommit, err = executil.OutputAt(ctx, grafanaDir, "git", "-C", "/tmp/grafana-enterprise", "rev-parse", "--short", "HEAD")
if err != nil {
log.Println("Could not get enterprise commit. Error:", err)
}
}
}
branch, err := executil.OutputAt(ctx, grafanaDir, "git", "rev-parse", "--abbrev-ref", "HEAD")
if err != nil {
return Revision{}, err
}
return Revision{
SHA256: sha,
EnterpriseCommit: enterpriseCommit,
Branch: branch,
Timestamp: stamp,
}, nil
}

View File

@@ -1,35 +0,0 @@
package cryptoutil
import (
"crypto/md5"
"fmt"
"io"
"log"
"os"
)
func MD5File(fpath string) error {
// Ignore gosec G304 as this function is only used in the build process.
//nolint:gosec
fd, err := os.Open(fpath)
if err != nil {
return err
}
defer func() {
if err := fd.Close(); err != nil {
log.Printf("error closing file at '%s': %s", fpath, err.Error())
}
}()
h := md5.New() // nolint:gosec
if _, err = io.Copy(h, fd); err != nil {
return err
}
// nolint:gosec
if err := os.WriteFile(fpath+".md5", []byte(fmt.Sprintf("%x\n", h.Sum(nil))), 0664); err != nil {
return err
}
return nil
}

View File

@@ -1,182 +0,0 @@
package docker
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/grafana/grafana/pkg/build/config"
)
// verifyArchive verifies the integrity of an archive file.
func verifyArchive(archive string) error {
log.Printf("Verifying checksum of %q", archive)
//nolint:gosec
shaB, err := os.ReadFile(archive + ".sha256")
if err != nil {
return err
}
exp := strings.TrimSpace(string(shaB))
//nolint:gosec
f, err := os.Open(archive)
if err != nil {
return err
}
defer func() {
if err := f.Close(); err != nil {
log.Println("error closing file:", err)
}
}()
h := sha256.New()
_, err = io.Copy(h, f)
if err != nil {
return err
}
chksum := hex.EncodeToString(h.Sum(nil))
if chksum != exp {
return fmt.Errorf("archive checksum is different than expected: %q", archive)
}
log.Printf("Archive %q has expected checksum: %s", archive, exp)
return nil
}
// BuildImage builds a Docker image.
// The image tag is returned.
func BuildImage(version string, arch config.Architecture, grafanaDir string, useUbuntu, shouldSave bool, edition config.Edition, mode config.VersionMode) ([]string, error) {
var baseArch string
switch arch {
case "amd64":
case "armv7":
baseArch = "arm32v7/"
case "arm64":
baseArch = "arm64v8/"
default:
return []string{}, fmt.Errorf("unrecognized architecture %q", arch)
}
libc := "-musl"
baseImage := fmt.Sprintf("%salpine:3.18.5", baseArch)
tagSuffix := ""
if useUbuntu {
libc = ""
baseImage = fmt.Sprintf("%subuntu:22.04", baseArch)
tagSuffix = "-ubuntu"
}
var editionStr string
var dockerRepo string
var additionalDockerRepo string
var tags []string
var imageFileBase string
var dockerEnterprise2Repo string
if repo, ok := os.LookupEnv("DOCKER_ENTERPRISE2_REPO"); ok {
dockerEnterprise2Repo = repo
}
switch edition {
case config.EditionOSS:
dockerRepo = "grafana/grafana-image-tags"
additionalDockerRepo = "grafana/grafana-oss-image-tags"
imageFileBase = "grafana-oss"
case config.EditionEnterprise:
dockerRepo = "grafana/grafana-enterprise-image-tags"
imageFileBase = "grafana-enterprise"
editionStr = "-enterprise"
case config.EditionEnterprise2:
dockerRepo = dockerEnterprise2Repo
imageFileBase = "grafana-enterprise2"
editionStr = "-enterprise2"
default:
return []string{}, fmt.Errorf("unrecognized edition %s", edition)
}
buildDir := filepath.Join(grafanaDir, "packaging/docker")
// For example: grafana-8.5.0-52819pre.linux-amd64-musl.tar.gz
archive := fmt.Sprintf("grafana%s-%s.linux-%s%s.tar.gz", editionStr, version, arch, libc)
if err := verifyArchive(filepath.Join(buildDir, archive)); err != nil {
return []string{}, err
}
tag := fmt.Sprintf("%s:%s%s-%s", dockerRepo, version, tagSuffix, arch)
tags = append(tags, tag)
args := []string{
"build",
"-q",
"--build-arg", fmt.Sprintf("BASE_IMAGE=%s", baseImage),
"--build-arg", fmt.Sprintf("GRAFANA_TGZ=%s", archive),
"--build-arg", "GO_SRC=tgz-builder",
"--build-arg", "JS_SRC=tgz-builder",
"--build-arg", "RUN_SH=./run.sh",
"--tag", tag,
"--no-cache",
"--file", "../../Dockerfile",
".",
"--label", fmt.Sprintf("mode=%s", string(mode)),
}
//nolint:gosec
cmd := exec.Command("docker", args...)
cmd.Dir = buildDir
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Env = append(os.Environ(), "DOCKER_CLI_EXPERIMENTAL=enabled", "DOCKER_BUILDKIT=1")
log.Printf("Running Docker: DOCKER_CLI_EXPERIMENTAL=enabled DOCKER_BUILDKIT=1 %s", cmd)
if err := cmd.Run(); err != nil {
return []string{}, fmt.Errorf("building Docker image failed: %w", err)
}
if shouldSave {
imageFile := fmt.Sprintf("%s-%s%s-%s.img", imageFileBase, version, tagSuffix, arch)
//nolint:gosec
cmd = exec.Command("docker", "save", tag, "-o", imageFile)
cmd.Dir = buildDir
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
log.Printf("Running Docker: %s", cmd)
if err := cmd.Run(); err != nil {
return []string{}, fmt.Errorf("saving Docker image failed: %w", err)
}
gcsURL := fmt.Sprintf("gs://grafana-prerelease/artifacts/docker/%s/%s", version, imageFile)
//nolint:gosec
cmd = exec.Command("gsutil", "-q", "cp", imageFile, gcsURL)
cmd.Dir = buildDir
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
log.Printf("Running gsutil: %s", cmd)
if err := cmd.Run(); err != nil {
return []string{}, fmt.Errorf("storing Docker image failed: %w", err)
}
log.Printf("Docker image %s stored to grafana-prerelease GCS bucket", imageFile)
}
if additionalDockerRepo != "" {
additionalTag := fmt.Sprintf("%s:%s%s-%s", additionalDockerRepo, version, tagSuffix, arch)
//nolint:gosec
cmd = exec.Command("docker", "tag", tag, additionalTag)
cmd.Dir = buildDir
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
log.Printf("Running Docker: %s", cmd)
if err := cmd.Run(); err != nil {
return []string{}, fmt.Errorf("tagging Docker image failed: %w", err)
}
tags = append(tags, additionalTag)
}
return tags, nil
}

View File

@@ -1,34 +0,0 @@
package docker
import (
"fmt"
"log"
"os"
"os/exec"
)
// AllArchs is a list of all supported Docker image architectures.
var AllArchs = []string{"amd64", "arm64"}
// emulatorImage is the docker image used as the cross-platform emulator
var emulatorImage = "tonistiigi/binfmt:qemu-v7.0.0"
// Init initializes the OS for Docker image building.
func Init() error {
// Necessary for cross-platform builds
if err := os.Setenv("DOCKER_BUILDKIT", "1"); err != nil {
log.Println("error setting DOCKER_BUILDKIT environment variable:", err)
}
// Enable execution of Docker images for other architectures
//nolint:gosec
cmd := exec.Command("docker", "run", "--privileged", "--rm",
emulatorImage, "--install", "all")
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("failed to enable execution of cross-platform Docker images: %w\n%s", err, output)
}
log.Println("emulators have been installed successfully!")
return nil
}

View File

@@ -1,62 +0,0 @@
package docker
import (
"fmt"
"log"
"os"
"os/exec"
"time"
)
const (
tries = 3
sleepTime = 30
)
func PushImage(newImage string) error {
var err error
for i := 0; i < tries; i++ {
log.Printf("push attempt #%d...", i+1)
var out []byte
cmd := exec.Command("docker", "push", newImage)
cmd.Dir = "."
out, err = cmd.CombinedOutput()
if err != nil {
log.Printf("output: %s", out)
log.Printf("sleep for %d, before retrying...", sleepTime)
time.Sleep(sleepTime * time.Second)
} else {
log.Printf("Successfully pushed %s!", newImage)
break
}
}
if err != nil {
return fmt.Errorf("error pushing images to DockerHub: %q", err)
}
return nil
}
func PushManifest(manifest string) error {
log.Printf("Pushing Docker manifest %s...", manifest)
var err error
for i := 0; i < tries; i++ {
log.Printf("push attempt #%d...", i+1)
var out []byte
cmd := exec.Command("docker", "manifest", "push", manifest)
cmd.Env = append(os.Environ(), "DOCKER_CLI_EXPERIMENTAL=enabled")
out, err = cmd.CombinedOutput()
if err != nil {
log.Printf("output: %s", out)
log.Printf("sleep for %d, before retrying...", sleepTime)
time.Sleep(sleepTime * time.Second)
} else {
log.Printf("Successful manifest push! %s", string(out))
break
}
}
if err != nil {
return fmt.Errorf("failed to push manifest, err: %w", err)
}
return nil
}

View File

@@ -1,18 +0,0 @@
package env
import (
"strings"
)
// Lookup is the equivalent of os.LookupEnv, only you are able to provide the list of environment variables.
// To use this as os.LookupEnv would be used, simply call
// `env.Lookup("ENVIRONMENT_VARIABLE", os.Environ())`
func Lookup(name string, vars []string) (string, bool) {
for _, v := range vars {
if strings.HasPrefix(v, name) {
return strings.TrimPrefix(v, name+"="), true
}
}
return "", false
}

View File

@@ -1,43 +0,0 @@
package env_test
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/build/env"
)
func TestLookup(t *testing.T) {
values := []string{"ENV_1=a", "ENV_2=b", "ENV_3=c", "ENV_4_TEST="}
{
v, ok := env.Lookup("ENV_1", values)
require.Equal(t, v, "a")
require.True(t, ok)
}
{
v, ok := env.Lookup("ENV_2", values)
require.Equal(t, v, "b")
require.True(t, ok)
}
{
v, ok := env.Lookup("ENV_3", values)
require.Equal(t, v, "c")
require.True(t, ok)
}
{
v, ok := env.Lookup("ENV_4_TEST", values)
require.Equal(t, v, "")
require.True(t, ok)
}
{
v, ok := env.Lookup("NOT_THERE", values)
require.Equal(t, v, "")
require.False(t, ok)
}
}

View File

@@ -1,61 +0,0 @@
package errutil
import (
"context"
"log"
"sync"
)
type Group struct {
cancel func()
wg sync.WaitGroup
errOnce sync.Once
err error
}
func GroupWithContext(ctx context.Context) (*Group, context.Context) {
ctx, cancel := context.WithCancel(ctx)
return &Group{cancel: cancel}, ctx
}
// Wait waits for any wrapped goroutines to finish and returns any error having occurred in one of them.
func (g *Group) Wait() error {
log.Println("Waiting on Group")
g.wg.Wait()
if g.cancel != nil {
log.Println("Group canceling its context after waiting")
g.cancel()
}
return g.err
}
// Cancel cancels the associated context.
func (g *Group) Cancel() {
log.Println("Group's Cancel method being called")
g.cancel()
}
// Wrap wraps a function to be executed in a goroutine.
func (g *Group) Wrap(f func() error) func() {
g.wg.Add(1)
return func() {
defer g.wg.Done()
if err := f(); err != nil {
g.errOnce.Do(func() {
log.Printf("An error occurred in Group: %s", err)
g.err = err
if g.cancel != nil {
log.Println("Group canceling its context due to error")
g.cancel()
}
})
}
}
}
// Go wraps the provided function and executes it in a goroutine.
func (g *Group) Go(f func() error) {
wrapped := g.Wrap(f)
go wrapped()
}

View File

@@ -1,46 +0,0 @@
package executil
import (
"context"
"fmt"
"os/exec"
"strings"
)
func RunAt(ctx context.Context, dir, cmd string, args ...string) error {
// Ignore gosec G204 as this function is only used in the build process.
//nolint:gosec
c := exec.CommandContext(ctx, cmd, args...)
c.Dir = dir
b, err := c.CombinedOutput()
if err != nil {
return fmt.Errorf("%w. '%s %v': %s", err, cmd, args, string(b))
}
return nil
}
func Run(ctx context.Context, cmd string, args ...string) error {
return RunAt(ctx, ".", cmd, args...)
}
func OutputAt(ctx context.Context, dir, cmd string, args ...string) (string, error) {
// Ignore gosec G204 as this function is only used in the build process.
//nolint:gosec
c := exec.CommandContext(ctx, cmd, args...)
c.Dir = dir
b, err := c.CombinedOutput()
if err != nil {
return "", err
}
return strings.TrimSpace(string(b)), nil
}
func Output(ctx context.Context, cmd string, args ...string) (string, error) {
return OutputAt(ctx, ".", cmd, args...)
}

View File

@@ -1,56 +0,0 @@
package frontend
import (
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/errutil"
"github.com/grafana/grafana/pkg/build/lerna"
"github.com/grafana/grafana/pkg/build/syncutil"
)
func BuildFrontendPackages(version string, edition config.Edition, grafanaDir string, p syncutil.WorkerPool, g *errutil.Group) error {
p.Schedule(g.Wrap(func() error {
if err := lerna.BuildFrontendPackages(version, edition, grafanaDir); err != nil {
return fmt.Errorf("failed to build %s frontend packages: %v", edition, err)
}
log.Printf("Finished building %s frontend packages", string(edition))
return nil
}))
return nil
}
// Build builds the Grafana front-end
func Build(edition config.Edition, grafanaDir string, p syncutil.WorkerPool, g *errutil.Group) error {
log.Printf("Building %s frontend in %q", edition, grafanaDir)
grafanaDir, err := filepath.Abs(grafanaDir)
if err != nil {
return err
}
for _, dpath := range []string{"tmp", "public_gen", "public/build"} {
dpath = filepath.Join(grafanaDir, dpath)
if err := os.RemoveAll(dpath); err != nil {
return fmt.Errorf("failed to remove %q: %w", dpath, err)
}
}
p.Schedule(g.Wrap(func() error {
cmd := exec.Command("yarn", "run", "build")
cmd.Dir = grafanaDir
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to build %s frontend with webpack: %s", edition, output)
}
log.Printf("Finished building %s frontend", edition)
return nil
}))
return nil
}

View File

@@ -1,42 +0,0 @@
package frontend
import (
"fmt"
"github.com/blang/semver/v4"
"github.com/grafana/grafana/pkg/build/config"
"github.com/urfave/cli/v2"
)
const GrafanaDir = "."
func GetConfig(c *cli.Context, metadata config.Metadata) (config.Config, config.Edition, error) {
cfg := config.Config{
NumWorkers: c.Int("jobs"),
GitHubToken: c.String("github-token"),
}
mode := config.Edition(c.String("edition"))
if metadata.ReleaseMode.Mode == config.TagMode && !metadata.ReleaseMode.IsTest {
packageJSONVersion, err := config.GetPackageJSONVersion(GrafanaDir)
if err != nil {
return config.Config{}, "", err
}
semverGrafanaVersion, err := semver.Parse(metadata.GrafanaVersion)
if err != nil {
return config.Config{}, "", err
}
semverPackageJSONVersion, err := semver.Parse(packageJSONVersion)
if err != nil {
return config.Config{}, "", err
}
// Check if the semver digits of the tag are not equal
if semverGrafanaVersion.FinalizeVersion() != semverPackageJSONVersion.FinalizeVersion() {
return config.Config{}, "", cli.Exit(fmt.Errorf("package.json version and input tag version differ %s != %s.\nPlease update package.json", packageJSONVersion, metadata.GrafanaVersion), 1)
}
}
cfg.PackageVersion = metadata.GrafanaVersion
return cfg, mode, nil
}

View File

@@ -1,118 +0,0 @@
package frontend
import (
"encoding/json"
"flag"
"os"
"testing"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
"github.com/grafana/grafana/pkg/build/config"
)
const (
jobs = "jobs"
githubToken = "github-token"
buildID = "build-id"
)
type packageJson struct {
Version string `json:"version"`
}
type flagObj struct {
name string
value string
}
var app = cli.NewApp()
func TestGetConfig(t *testing.T) {
tests := []struct {
ctx *cli.Context
name string
packageJsonVersion string
metadata config.Metadata
wantErr bool
}{
{
ctx: cli.NewContext(app, setFlags(t, flag.NewFlagSet("flagSet", flag.ContinueOnError), flagObj{name: jobs, value: "2"}, flagObj{name: githubToken, value: "token"}), nil),
name: "package.json matches tag",
packageJsonVersion: "10.0.0",
metadata: config.Metadata{GrafanaVersion: "10.0.0", ReleaseMode: config.ReleaseMode{Mode: config.TagMode}},
wantErr: false,
},
{
ctx: cli.NewContext(app, setFlags(t, flag.NewFlagSet("flagSet", flag.ContinueOnError), flagObj{name: jobs, value: "2"}, flagObj{name: githubToken, value: "token"}), nil),
name: "custom tag, package.json doesn't match",
packageJsonVersion: "10.0.0",
metadata: config.Metadata{GrafanaVersion: "10.0.0-abcd123pre", ReleaseMode: config.ReleaseMode{Mode: config.TagMode}},
wantErr: false,
},
{
ctx: cli.NewContext(app, setFlags(t, flag.NewFlagSet("flagSet", flag.ContinueOnError), flagObj{name: jobs, value: "2"}, flagObj{name: githubToken, value: "token"}), nil),
name: "package.json doesn't match tag",
packageJsonVersion: "10.1.0",
metadata: config.Metadata{GrafanaVersion: "10.0.0", ReleaseMode: config.ReleaseMode{Mode: config.TagMode}},
wantErr: true,
},
{
ctx: cli.NewContext(app, setFlags(t, flag.NewFlagSet("flagSet", flag.ContinueOnError), flagObj{name: jobs, value: "2"}, flagObj{name: githubToken, value: "token"}), nil),
name: "test tag event, check should be skipped",
packageJsonVersion: "10.1.0",
metadata: config.Metadata{GrafanaVersion: "10.1.0-test", ReleaseMode: config.ReleaseMode{Mode: config.TagMode, IsTest: true}},
wantErr: false,
},
{
ctx: cli.NewContext(app, setFlags(t, flag.NewFlagSet("flagSet", flag.ContinueOnError), flagObj{name: jobs, value: "2"}, flagObj{name: githubToken, value: "token"}, flagObj{name: buildID, value: "12345"}), nil),
name: "non-tag event",
packageJsonVersion: "10.1.0-pre",
metadata: config.Metadata{GrafanaVersion: "10.1.0-12345pre", ReleaseMode: config.ReleaseMode{Mode: config.PullRequestMode}},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := createTempPackageJson(t, tt.packageJsonVersion)
require.NoError(t, err)
got, _, err := GetConfig(tt.ctx, tt.metadata)
if !tt.wantErr {
require.Equal(t, got.PackageVersion, tt.metadata.GrafanaVersion)
}
if tt.wantErr {
require.Equal(t, got.PackageVersion, "")
require.Error(t, err)
}
})
}
}
func setFlags(t *testing.T, flagSet *flag.FlagSet, flags ...flagObj) *flag.FlagSet {
t.Helper()
for _, f := range flags {
if f.name != "" {
flagSet.StringVar(&f.name, f.name, f.value, "")
}
}
return flagSet
}
func createTempPackageJson(t *testing.T, version string) error {
t.Helper()
data := packageJson{Version: version}
file, _ := json.MarshalIndent(data, "", " ")
err := os.WriteFile("package.json", file, 0644)
require.NoError(t, err)
t.Cleanup(func() {
err := os.RemoveAll("package.json")
require.NoError(t, err)
})
return nil
}

View File

@@ -1,65 +0,0 @@
package fsutil
import (
"fmt"
"os"
"path/filepath"
)
// CopyRecursive copies files and directories recursively.
func CopyRecursive(src, dst string) error {
sfi, err := os.Stat(src)
if err != nil {
return err
}
if !sfi.IsDir() {
return CopyFile(src, dst)
}
if _, err := os.Stat(dst); os.IsNotExist(err) {
if err := os.MkdirAll(dst, sfi.Mode()); err != nil {
return fmt.Errorf("failed to create directory %q: %s", dst, err)
}
}
entries, err := os.ReadDir(src)
if err != nil {
return err
}
for _, entry := range entries {
srcPath := filepath.Join(src, entry.Name())
dstPath := filepath.Join(dst, entry.Name())
srcFi, err := os.Stat(srcPath)
if err != nil {
return err
}
switch srcFi.Mode() & os.ModeType {
case os.ModeDir:
if err := CopyRecursive(srcPath, dstPath); err != nil {
return err
}
case os.ModeSymlink:
link, err := os.Readlink(srcPath)
if err != nil {
return err
}
if err := os.Symlink(link, dstPath); err != nil {
return err
}
default:
if err := CopyFile(srcPath, dstPath); err != nil {
return err
}
}
if srcFi.Mode()&os.ModeSymlink != 0 {
if err := os.Chmod(dstPath, srcFi.Mode()); err != nil {
return err
}
}
}
return nil
}

View File

@@ -1,43 +0,0 @@
package fsutil
import (
"fmt"
"os"
)
// CreateTempFile generates a temp filepath, based on the provided suffix.
// A typical generated path looks like /var/folders/abcd/abcdefg/A/1137975807.
func CreateTempFile(sfx string) (string, error) {
var suffix string
if sfx != "" {
suffix = fmt.Sprintf("*-%s", sfx)
} else {
suffix = sfx
}
f, err := os.CreateTemp("", suffix)
if err != nil {
return "", err
}
if err := f.Close(); err != nil {
return "", err
}
return f.Name(), nil
}
// CreateTempDir generates a temp directory, based on the provided suffix.
// A typical generated path looks like /var/folders/abcd/abcdefg/A/1137975807/.
func CreateTempDir(sfx string) (string, error) {
var suffix string
if sfx != "" {
suffix = fmt.Sprintf("*-%s", sfx)
} else {
suffix = sfx
}
dir, err := os.MkdirTemp("", suffix)
if err != nil {
return "", err
}
return dir, nil
}

View File

@@ -1,48 +0,0 @@
package fsutil
import (
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func TestCreateTempFile(t *testing.T) {
t.Run("empty suffix, expects pattern like: /var/folders/abcd/abcdefg/A/1137975807", func(t *testing.T) {
filePath, err := CreateTempFile("")
require.NoError(t, err)
pathParts := strings.Split(filePath, "/")
require.Greater(t, len(pathParts), 1)
require.Len(t, strings.Split(pathParts[len(pathParts)-1], "-"), 1)
})
t.Run("non-empty suffix, expects /var/folders/abcd/abcdefg/A/1137975807-foobar", func(t *testing.T) {
filePath, err := CreateTempFile("foobar")
require.NoError(t, err)
pathParts := strings.Split(filePath, "/")
require.Greater(t, len(pathParts), 1)
require.Len(t, strings.Split(pathParts[len(pathParts)-1], "-"), 2)
})
}
func TestCreateTempDir(t *testing.T) {
t.Run("empty suffix, expects pattern like: /var/folders/abcd/abcdefg/A/1137975807/", func(t *testing.T) {
filePath, err := CreateTempFile("")
require.NoError(t, err)
pathParts := strings.Split(filePath, "/")
require.Greater(t, len(pathParts), 1)
require.Len(t, strings.Split(pathParts[len(pathParts)-1], "-"), 1)
})
t.Run("non-empty suffix, expects /var/folders/abcd/abcdefg/A/1137975807-foobar/", func(t *testing.T) {
filePath, err := CreateTempFile("foobar")
require.NoError(t, err)
pathParts := strings.Split(filePath, "/")
require.Greater(t, len(pathParts), 1)
require.Len(t, strings.Split(pathParts[len(pathParts)-1], "-"), 2)
})
}

72
pkg/build/gcom/url.go Normal file
View File

@@ -0,0 +1,72 @@
package gcom
import (
"fmt"
"net/url"
"path"
"strings"
"github.com/grafana/grafana/pkg/build/versions"
)
func PackageName(grafana, distro, arch, version, ext string, musl bool, raspberryPi bool) string {
v := versions.ParseSemver(version)
if raspberryPi {
grafana += "-rpi"
}
versionString := strings.Join([]string{v.Major, v.Minor, v.Patch}, ".")
fmt.Println("Version string:", versionString)
if distro == "deb" {
if v.BuildMetadata != "" {
versionString += "+" + strings.ReplaceAll(v.BuildMetadata, "-", "~")
}
if v.Prerelease != "" {
versionString += "~" + v.Prerelease
}
return strings.Join([]string{grafana, versionString, arch}, "_") + "." + ext
}
if distro == "rhel" {
if v.BuildMetadata != "" {
versionString += "+" + strings.ReplaceAll(v.BuildMetadata, "-", "~")
}
if v.Prerelease != "" {
versionString += "~" + v.Prerelease
}
versionString += "-1"
// Notable difference between our deb naming and our RPM naming: the file ends with `.arch.ext`, not
// `_arch.ext`.
return strings.Join([]string{grafana, versionString}, "-") + "." + arch + "." + ext
}
if v.Prerelease != "" {
versionString += "-" + v.Prerelease
}
if v.BuildMetadata != "" {
versionString += "+" + v.BuildMetadata
}
if musl {
arch += "-musl"
}
// grafana-enterprise-1.2.3+example-01.linux-amd64.tar.gz
return fmt.Sprintf("%s-%s.%s-%s.%s", grafana, versionString, distro, arch, ext)
}
func GetURL(baseURL *url.URL, version, grafana, distro, arch, ext string, musl, raspberryPi bool) *url.URL {
packageName := PackageName(grafana, distro, arch, version, ext, musl, raspberryPi)
return &url.URL{
Host: baseURL.Host,
Scheme: baseURL.Scheme,
Path: path.Join(baseURL.Path, packageName),
}
}

367
pkg/build/gcom/url_test.go Normal file
View File

@@ -0,0 +1,367 @@
package gcom_test
import (
"fmt"
"testing"
"github.com/grafana/grafana/pkg/build/gcom"
"github.com/stretchr/testify/require"
)
func TestPackageName(t *testing.T) {
type args struct {
Distro string
Arch string
Version string
Ext string
Musl bool
RaspberryPi bool
Expect string
}
cases := []args{
{
RaspberryPi: true,
Distro: "deb",
Arch: "armhf",
Version: "1.2.3",
Ext: "deb",
Expect: "grafana-rpi_1.2.3_armhf.deb",
},
{
Distro: "deb",
Arch: "arm64",
Version: "1.2.3",
Ext: "deb",
Expect: "grafana_1.2.3_arm64.deb",
},
{
Distro: "rhel",
Arch: "aarch64",
Version: "1.2.3",
Ext: "rpm",
Expect: "grafana-1.2.3-1.aarch64.rpm",
},
{
Distro: "rhel",
Arch: "aarch64",
Ext: "rpm.sha256",
Version: "1.2.3",
Expect: "grafana-1.2.3-1.aarch64.rpm.sha256",
},
{
Distro: "rhel",
Ext: "rpm",
Version: "1.2.3",
Arch: "x86_64",
Expect: "grafana-1.2.3-1.x86_64.rpm",
},
{
Distro: "rhel",
Ext: "rpm.sha256",
Version: "1.2.3",
Arch: "x86_64",
Expect: "grafana-1.2.3-1.x86_64.rpm.sha256",
},
{
Distro: "darwin",
Ext: "tar.gz",
Version: "1.2.3",
Arch: "amd64",
Expect: "grafana-1.2.3.darwin-amd64.tar.gz",
},
{
Distro: "darwin",
Ext: "tar.gz.sha256",
Version: "1.2.3",
Arch: "amd64",
Expect: "grafana-1.2.3.darwin-amd64.tar.gz.sha256",
},
{
Distro: "darwin",
Ext: "tar.gz",
Version: "1.2.3",
Arch: "arm64",
Expect: "grafana-1.2.3.darwin-arm64-musl.tar.gz",
Musl: true,
},
{
Distro: "darwin",
Ext: "tar.gz.sha256",
Version: "1.2.3",
Arch: "arm64",
Expect: "grafana-1.2.3.darwin-arm64-musl.tar.gz.sha256",
Musl: true,
},
{
Distro: "darwin",
Ext: "tar.gz",
Version: "1.2.3",
Arch: "arm64",
Expect: "grafana-1.2.3.darwin-arm64.tar.gz",
},
{
Distro: "darwin",
Ext: "tar.gz.sha256",
Version: "1.2.3",
Arch: "arm64",
Expect: "grafana-1.2.3.darwin-arm64.tar.gz.sha256",
},
{
Distro: "linux",
Ext: "tar.gz",
Version: "1.2.3",
Arch: "amd64",
Expect: "grafana-1.2.3.linux-amd64-musl.tar.gz",
Musl: true,
},
{
Distro: "linux",
Ext: "tar.gz.sha256",
Version: "1.2.3",
Arch: "amd64",
Expect: "grafana-1.2.3.linux-amd64-musl.tar.gz.sha256",
Musl: true,
},
{
Distro: "linux",
Ext: "tar.gz",
Version: "1.2.3",
Arch: "amd64",
Expect: "grafana-1.2.3.linux-amd64.tar.gz",
},
{
Distro: "linux",
Ext: "tar.gz.sha256",
Version: "1.2.3",
Arch: "amd64",
Expect: "grafana-1.2.3.linux-amd64.tar.gz.sha256",
},
{
Distro: "linux",
Ext: "tar.gz",
Version: "1.2.3",
Arch: "arm64",
Expect: "grafana-1.2.3.linux-arm64-musl.tar.gz",
Musl: true,
},
{
Distro: "linux",
Ext: "tar.gz.sha256",
Version: "1.2.3",
Arch: "arm64",
Expect: "grafana-1.2.3.linux-arm64-musl.tar.gz.sha256",
Musl: true,
},
{
Distro: "linux",
Ext: "tar.gz",
Version: "1.2.3",
Arch: "arm64",
Expect: "grafana-1.2.3.linux-arm64.tar.gz",
},
{
Ext: "tar.gz.sha256",
Version: "1.2.3",
Distro: "linux",
Arch: "arm64",
Expect: "grafana-1.2.3.linux-arm64.tar.gz.sha256",
},
{
Ext: "tar.gz",
Version: "1.2.3",
Distro: "linux",
Arch: "armv6",
Expect: "grafana-1.2.3.linux-armv6.tar.gz",
},
{
Ext: "tar.gz.sha256",
Version: "1.2.3",
Distro: "linux",
Arch: "armv6",
Expect: "grafana-1.2.3.linux-armv6.tar.gz.sha256",
},
{
Ext: "tar.gz",
Version: "1.2.3",
Distro: "linux",
Arch: "armv7",
Expect: "grafana-1.2.3.linux-armv7-musl.tar.gz",
Musl: true,
},
{
Ext: "tar.gz.sha256",
Version: "1.2.3",
Distro: "linux",
Arch: "armv7",
Expect: "grafana-1.2.3.linux-armv7-musl.tar.gz.sha256",
Musl: true,
},
{
Ext: "tar.gz",
Version: "1.2.3",
Distro: "linux",
Arch: "armv7",
Expect: "grafana-1.2.3.linux-armv7.tar.gz",
},
{
Ext: "tar.gz.sha256",
Version: "1.2.3",
Distro: "linux",
Arch: "armv7",
Expect: "grafana-1.2.3.linux-armv7.tar.gz.sha256",
},
{
Version: "1.2.3",
Arch: "amd64",
Ext: "exe",
Distro: "windows",
Expect: "grafana-1.2.3.windows-amd64.exe",
},
{
Version: "1.2.3",
Arch: "amd64",
Distro: "windows",
Ext: "exe.sha256",
Expect: "grafana-1.2.3.windows-amd64.exe.sha256",
},
{
Version: "1.2.3",
Arch: "amd64",
Distro: "windows",
Ext: "msi",
Expect: "grafana-1.2.3.windows-amd64.msi",
},
{
Version: "1.2.3",
Arch: "amd64",
Distro: "windows",
Ext: "msi.sha256",
Expect: "grafana-1.2.3.windows-amd64.msi.sha256",
},
{
Ext: "tar.gz",
Version: "1.2.3",
Distro: "windows",
Expect: "grafana-1.2.3.windows-amd64.tar.gz",
Arch: "amd64",
},
{
Version: "1.2.3",
Distro: "windows",
Arch: "amd64",
Ext: "tar.gz.sha256",
Expect: "grafana-1.2.3.windows-amd64.tar.gz.sha256",
},
{
Version: "1.2.3",
Distro: "windows",
Expect: "grafana-1.2.3.windows-amd64.zip",
Ext: "zip",
Arch: "amd64",
},
{
Version: "1.2.3",
Distro: "windows",
Expect: "grafana-1.2.3.windows-amd64.zip.sha256",
Ext: "zip.sha256",
Arch: "amd64",
},
{
Ext: "tar.gz",
Version: "1.2.3",
Distro: "windows",
Arch: "arm64",
Expect: "grafana-1.2.3.windows-arm64-musl.tar.gz",
Musl: true,
},
{
Version: "1.2.3",
Ext: "tar.gz.sha256",
Distro: "windows",
Arch: "arm64",
Expect: "grafana-1.2.3.windows-arm64-musl.tar.gz.sha256",
Musl: true,
},
{
Ext: "tar.gz",
Version: "1.2.3",
Distro: "windows",
Arch: "arm64",
Expect: "grafana-1.2.3.windows-arm64.tar.gz",
},
{
Version: "1.2.3",
Ext: "tar.gz.sha256",
Distro: "windows",
Arch: "arm64",
Expect: "grafana-1.2.3.windows-arm64.tar.gz.sha256",
},
{
RaspberryPi: true,
Version: "1.2.3",
Ext: "deb",
Arch: "armhf",
Distro: "deb",
Expect: "grafana-rpi_1.2.3_armhf.deb",
},
{
RaspberryPi: true,
Version: "1.2.3",
Ext: "deb.sha256",
Distro: "deb",
Arch: "armhf",
Expect: "grafana-rpi_1.2.3_armhf.deb.sha256",
},
{
Version: "1.2.3",
Ext: "deb",
Distro: "deb",
Expect: "grafana_1.2.3_amd64.deb",
Arch: "amd64",
},
{
Version: "1.2.3",
Ext: "deb.sha256",
Distro: "deb",
Expect: "grafana_1.2.3_amd64.deb.sha256",
Arch: "amd64",
},
{
Version: "1.2.3",
Ext: "deb",
Arch: "arm64",
Distro: "deb",
Expect: "grafana_1.2.3_arm64.deb",
},
{
Version: "1.2.3",
Ext: "deb.sha256",
Arch: "arm64",
Distro: "deb",
Expect: "grafana_1.2.3_arm64.deb.sha256",
},
{
Version: "1.2.3",
Ext: "deb",
Distro: "deb",
Arch: "armhf",
Expect: "grafana_1.2.3_armhf.deb",
},
{
Version: "1.2.3",
Ext: "deb.sha256",
Arch: "armhf",
Distro: "deb",
Expect: "grafana_1.2.3_armhf.deb.sha256",
},
}
for i, v := range cases {
t.Run(fmt.Sprintf("[%d / %d] %s", i+1, len(cases), v.Expect), func(t *testing.T) {
n := gcom.PackageName("grafana", v.Distro, v.Arch, v.Version, v.Ext, v.Musl, v.RaspberryPi)
require.Equal(t, v.Expect, n)
})
}
}

View File

@@ -4,13 +4,9 @@ import (
"context"
"errors"
"fmt"
"net/http"
"regexp"
"github.com/google/go-github/v45/github"
"golang.org/x/oauth2"
"github.com/grafana/grafana/pkg/build/stringutil"
)
const (
@@ -47,19 +43,6 @@ type StatusesService interface {
CreateStatus(ctx context.Context, owner, repo, ref string, status *github.RepoStatus) (*github.RepoStatus, *github.Response, error)
}
// NewGitHubClient creates a new Client using the provided GitHub token if not empty.
func NewGitHubClient(ctx context.Context, token string) *github.Client {
var tc *http.Client
if token != "" {
ts := oauth2.StaticTokenSource(&oauth2.Token{
AccessToken: token,
})
tc = oauth2.NewClient(ctx, ts)
}
return github.NewClient(tc)
}
func PRCheckRegexp() *regexp.Regexp {
reBranch, err := regexp.Compile(`^prc-([0-9]+)-([A-Za-z0-9]+)\/(.+)$`)
if err != nil {
@@ -68,76 +51,3 @@ func PRCheckRegexp() *regexp.Regexp {
return reBranch
}
func AddLabelToPR(ctx context.Context, client LabelsService, prID int, newLabel string) error {
// Check existing labels
labels, _, err := client.ListLabelsByIssue(ctx, RepoOwner, OSSRepo, prID, nil)
if err != nil {
return err
}
duplicate := false
for _, label := range labels {
if *label.Name == newLabel {
duplicate = true
continue
}
// Delete existing "enterprise-xx" labels
if stringutil.Contains(EnterpriseCheckLabels, *label.Name) {
_, err := client.RemoveLabelForIssue(ctx, RepoOwner, OSSRepo, prID, *label.Name)
if err != nil {
return err
}
}
}
if duplicate {
return nil
}
_, _, err = client.AddLabelsToIssue(ctx, RepoOwner, OSSRepo, prID, []string{newLabel})
if err != nil {
return err
}
return nil
}
func DeleteEnterpriseBranch(ctx context.Context, client GitService, branchName string) error {
ref := "heads/" + branchName
if _, err := client.DeleteRef(ctx, RepoOwner, EnterpriseRepo, ref); err != nil {
return err
}
return nil
}
// CreateEnterpriseStatus sets the status on a commit for the enterprise build check.
func CreateEnterpriseStatus(ctx context.Context, client StatusesService, sha, link, status string) (*github.RepoStatus, error) {
check, _, err := client.CreateStatus(ctx, RepoOwner, OSSRepo, sha, &github.RepoStatus{
Context: github.String(EnterpriseCheckName),
Description: github.String(EnterpriseCheckDescription),
TargetURL: github.String(link),
State: github.String(status),
})
if err != nil {
return nil, err
}
return check, nil
}
func CreateEnterpriseBuildFailedComment(ctx context.Context, client CommentService, link string, prID int) error {
body := fmt.Sprintf("Drone build failed: %s", link)
_, _, err := client.CreateComment(ctx, RepoOwner, OSSRepo, prID, &github.IssueComment{
Body: &body,
})
if err != nil {
return err
}
return nil
}

View File

@@ -1,56 +0,0 @@
package git_test
import (
"context"
"errors"
"testing"
"github.com/google/go-github/v45/github"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/build/git"
)
type TestChecksService struct {
CreateCheckRunError error
}
func (s *TestChecksService) CreateStatus(ctx context.Context, owner, repo, ref string, status *github.RepoStatus) (*github.RepoStatus, *github.Response, error) {
if s.CreateCheckRunError != nil {
return nil, nil, s.CreateCheckRunError
}
return &github.RepoStatus{
ID: github.Int64(1),
URL: status.URL,
}, nil, nil
}
func TestCreateEnterpriseRepoStatus(t *testing.T) {
t.Run("It should create a repo status", func(t *testing.T) {
var (
ctx = context.Background()
client = &TestChecksService{}
link = "http://example.com"
sha = "1234"
)
_, err := git.CreateEnterpriseStatus(ctx, client, link, sha, "success")
require.NoError(t, err)
})
t.Run("It should return an error if GitHub fails to create the status", func(t *testing.T) {
var (
ctx = context.Background()
createCheckError = errors.New("create check run error")
client = &TestChecksService{
CreateCheckRunError: createCheckError,
}
link = "http://example.com"
sha = "1234"
)
_, err := git.CreateEnterpriseStatus(ctx, client, link, sha, "success")
require.ErrorIs(t, err, createCheckError)
})
}

View File

@@ -1,135 +0,0 @@
package git_test
import (
"context"
"errors"
"testing"
"github.com/google/go-github/v45/github"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/build/git"
)
type TestLabelsService struct {
Labels []*github.Label
ListLabelsError error
RemoveLabelError error
AddLabelsError error
}
func (s *TestLabelsService) ListLabelsByIssue(ctx context.Context, owner string, repo string, number int, opts *github.ListOptions) ([]*github.Label, *github.Response, error) {
if s.ListLabelsError != nil {
return nil, nil, s.ListLabelsError
}
labels := s.Labels
if labels == nil {
labels = []*github.Label{}
}
return labels, nil, nil
}
func (s *TestLabelsService) RemoveLabelForIssue(ctx context.Context, owner string, repo string, number int, label string) (*github.Response, error) {
if s.RemoveLabelError != nil {
return nil, s.RemoveLabelError
}
return &github.Response{}, nil
}
func (s *TestLabelsService) AddLabelsToIssue(ctx context.Context, owner string, repo string, number int, labels []string) ([]*github.Label, *github.Response, error) {
if s.AddLabelsError != nil {
return nil, nil, s.AddLabelsError
}
l := make([]*github.Label, len(labels))
for i, v := range labels {
l[i] = &github.Label{
Name: github.String(v),
}
}
return l, nil, nil
}
func TestAddLabelToPR(t *testing.T) {
t.Run("It should add a label to a pull request", func(t *testing.T) {
var (
ctx = context.Background()
client = &TestLabelsService{}
pr = 20
label = "test-label"
)
require.NoError(t, git.AddLabelToPR(ctx, client, pr, label))
})
t.Run("It should not return an error if the label already exists", func(t *testing.T) {
var (
ctx = context.Background()
client = &TestLabelsService{
Labels: []*github.Label{
{
Name: github.String("test-label"),
},
},
}
pr = 20
label = "test-label"
)
require.NoError(t, git.AddLabelToPR(ctx, client, pr, label))
})
t.Run("It should return an error if GitHub returns an error when listing labels", func(t *testing.T) {
var (
ctx = context.Background()
listLabelsError = errors.New("list labels error")
client = &TestLabelsService{
ListLabelsError: listLabelsError,
Labels: []*github.Label{},
}
pr = 20
label = "test-label"
)
require.ErrorIs(t, git.AddLabelToPR(ctx, client, pr, label), listLabelsError)
})
t.Run("It should not return an error if there are existing enterprise-check labels.", func(t *testing.T) {
var (
ctx = context.Background()
client = &TestLabelsService{
Labels: []*github.Label{
{
Name: github.String("enterprise-failed"),
},
},
}
pr = 20
label = "test-label"
)
require.NoError(t, git.AddLabelToPR(ctx, client, pr, label))
})
t.Run("It should return an error if GitHub returns an error when removing existing enterprise-check labels", func(t *testing.T) {
var (
ctx = context.Background()
removeLabelError = errors.New("remove label error")
client = &TestLabelsService{
RemoveLabelError: removeLabelError,
Labels: []*github.Label{
{
Name: github.String("enterprise-failed"),
},
},
}
pr = 20
label = "test-label"
)
require.ErrorIs(t, git.AddLabelToPR(ctx, client, pr, label), removeLabelError)
})
}

View File

@@ -1,57 +0,0 @@
package git_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/grafana/grafana/pkg/build/git"
)
func TestPRCheckRegexp(t *testing.T) {
type match struct {
String string
Commit string
Branch string
PR string
}
var (
shouldMatch = []match{
{
String: "prc-1-a1b2c3d4/branch-name",
Branch: "branch-name",
Commit: "a1b2c3d4",
PR: "1",
},
{
String: "prc-111-a1b2c3d4/branch/name",
Branch: "branch/name",
Commit: "a1b2c3d4",
PR: "111",
},
{
String: "prc-102930122-a1b2c3d4/branch-name",
Branch: "branch-name",
Commit: "a1b2c3d4",
PR: "102930122",
},
}
shouldNotMatch = []string{"prc-a/branch", "km/test", "test", "prc", "prc/test", "price"}
)
regex := git.PRCheckRegexp()
for _, v := range shouldMatch {
assert.Truef(t, regex.MatchString(v.String), "regex '%s' should match %s", regex.String(), v)
m := regex.FindStringSubmatch(v.String)
assert.Equal(t, m[1], v.PR)
assert.Equal(t, m[2], v.Commit)
assert.Equal(t, m[3], v.Branch)
}
for _, v := range shouldNotMatch {
assert.False(t, regex.MatchString(v), "regex '%s' should not match %s", regex.String(), v)
}
}

View File

@@ -1,124 +0,0 @@
package golangutils
import (
"context"
"fmt"
"io"
"os/exec"
"strings"
"github.com/grafana/grafana/pkg/build/config"
)
type BuildOpts struct {
// Package refers to the path to the `main` package containing `func main`
Package string
// Output is used as the -o argument in the go build command
Output string
// Workdir should define some place in the module where the package path resolves.
// Go commands need to be ran inside a the Go module directory.
Workdir string
GoOS config.OS
GoArch config.Architecture
GoArm string
Go386 string
CC string
LibC string
CGoEnabled bool
CGoCFlags string
// LdFlags are joined by a space character and provided to the -ldflags argument.
// A valid element here would be `-X 'main.version=1.0.0'`.
LdFlags []string
Stdout io.ReadWriter
Stderr io.ReadWriter
Stdin io.ReadWriter
// ExtraEnv allows consumers to provide extra env args that are not defined above.
// A single element should be formatted using like so: {NAME}={VALUE}. Example: GOOS=linux.
ExtraEnv []string
// ExtraArgs allows consumers to provide extra arguments that are not defined above.
// Flag names and values should be two separate elements.
// These flags will be appended to the command arguments before the package path in "go build".
ExtraArgs []string
}
// Env constructs a list of key/value pairs for setting a build command's environment.
// Should we consider using something to unmarshal the struct to env?
func (opts BuildOpts) Env() []string {
env := []string{}
if opts.CGoEnabled {
env = append(env, "CGO_ENABLED=1")
}
if opts.GoOS != "" {
env = append(env, fmt.Sprintf("GOOS=%s", opts.GoOS))
}
if opts.GoArch != "" {
env = append(env, fmt.Sprintf("GOARCH=%s", opts.GoArch))
}
if opts.CC != "" {
env = append(env, fmt.Sprintf("CC=%s", opts.CC))
}
if opts.CGoCFlags != "" {
env = append(env, fmt.Sprintf("CGO_CFLAGS=%s", opts.CGoCFlags))
}
if opts.GoArm != "" {
env = append(env, fmt.Sprintf("GOARM=%s", opts.GoArm))
}
if opts.ExtraEnv != nil {
return append(opts.ExtraEnv, env...)
}
return env
}
// Args constructs a list of flags and values for use with the exec.Command type when running "go build".
func (opts BuildOpts) Args() []string {
args := []string{}
if opts.LdFlags != nil {
args = append(args, "-ldflags", strings.Join(opts.LdFlags, " "))
}
if opts.Output != "" {
args = append(args, "-o", opts.Output)
}
if opts.ExtraArgs != nil {
args = append(args, opts.ExtraArgs...)
}
args = append(args, opts.Package)
return args
}
// Build runs the go build process in the current shell given the opts.
// This function will panic if no Stdout/Stderr/Stdin is provided in the opts.
func RunBuild(ctx context.Context, opts BuildOpts) error {
env := opts.Env()
args := append([]string{"build"}, opts.Args()...)
// Ignore gosec G304 as this function is only used in the build process.
//nolint:gosec
cmd := exec.CommandContext(ctx, "go", args...)
cmd.Env = env
cmd.Stdout = opts.Stdout
cmd.Stderr = opts.Stderr
cmd.Stdin = opts.Stdin
cmd.Dir = opts.Workdir
return cmd.Run()
}

View File

@@ -1,2 +0,0 @@
// Package golangutils holds utility functions, wrappers, and types for building Go binaries for Grafana.
package golangutils

View File

@@ -1,73 +0,0 @@
package gpg
import (
"encoding/base64"
"fmt"
"log"
"os"
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/fsutil"
)
// LoadGPGKeys loads GPG key pair and password from the environment and writes them to corresponding files.
//
// The passed config's GPG fields also get updated. Make sure to call RemoveGPGFiles at application exit.
func LoadGPGKeys(cfg *config.Config) error {
var err error
cfg.GPGPrivateKey, err = fsutil.CreateTempFile("priv.key")
if err != nil {
return err
}
cfg.GPGPublicKey, err = fsutil.CreateTempFile("pub.key")
if err != nil {
return err
}
cfg.GPGPassPath, err = fsutil.CreateTempFile("")
if err != nil {
return err
}
gpgPrivKey := os.Getenv("GPG_PRIV_KEY")
if gpgPrivKey == "" {
return fmt.Errorf("$GPG_PRIV_KEY must be defined")
}
gpgPubKey := os.Getenv("GPG_PUB_KEY")
if gpgPubKey == "" {
return fmt.Errorf("$GPG_PUB_KEY must be defined")
}
gpgPass := os.Getenv("GPG_KEY_PASSWORD")
if gpgPass == "" {
return fmt.Errorf("$GPG_KEY_PASSWORD must be defined")
}
gpgPrivKeyB, err := base64.StdEncoding.DecodeString(gpgPrivKey)
if err != nil {
return fmt.Errorf("couldn't decode $GPG_PRIV_KEY: %w", err)
}
gpgPubKeyB, err := base64.StdEncoding.DecodeString(gpgPubKey)
if err != nil {
return fmt.Errorf("couldn't decode $GPG_PUB_KEY: %w", err)
}
if err := os.WriteFile(cfg.GPGPrivateKey, append(gpgPrivKeyB, '\n'), 0400); err != nil {
return fmt.Errorf("failed to write GPG private key file: %w", err)
}
if err := os.WriteFile(cfg.GPGPublicKey, append(gpgPubKeyB, '\n'), 0400); err != nil {
return fmt.Errorf("failed to write GPG public key file: %w", err)
}
if err := os.WriteFile(cfg.GPGPassPath, []byte(gpgPass+"\n"), 0400); err != nil {
return fmt.Errorf("failed to write GPG password file: %w", err)
}
return nil
}
// RemoveGPGFiles removes configured GPG files.
func RemoveGPGFiles(cfg config.Config) {
for _, fpath := range []string{cfg.GPGPrivateKey, cfg.GPGPublicKey, cfg.GPGPassPath} {
if err := os.Remove(fpath); err != nil {
log.Printf("failed to remove %q", fpath)
}
}
}

View File

@@ -1,73 +0,0 @@
package gpg
import (
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/fsutil"
)
// writeRpmMacros writes ~/.rpmmacros.
func writeRpmMacros(homeDir, gpgPassPath string) error {
fpath := filepath.Join(homeDir, ".rpmmacros")
content := fmt.Sprintf(`%%_signature gpg
%%_gpg_path %s/.gnupg
%%_gpg_name Grafana
%%_gpgbin /usr/bin/gpg
%%__gpg_sign_cmd %%{__gpg} gpg --batch --yes --pinentry-mode loopback --no-armor --passphrase-file %s --no-secmem-warning -u "%%{_gpg_name}" -sbo %%{__signature_filename} %%{__plaintext_filename}
`, homeDir, gpgPassPath)
//nolint:gosec
if err := os.WriteFile(fpath, []byte(content), 0600); err != nil {
return fmt.Errorf("failed to write %q: %w", fpath, err)
}
return nil
}
// Import imports the GPG package signing key.
// ~/.rpmmacros also gets written.
func Import(cfg config.Config) error {
exists, err := fsutil.Exists(cfg.GPGPrivateKey)
if err != nil {
return err
}
if !exists {
return fmt.Errorf("GPG private key file doesn't exist: %q", cfg.GPGPrivateKey)
}
log.Printf("Importing GPG key %q...", cfg.GPGPrivateKey)
// nolint:gosec
cmd := exec.Command("gpg", "--batch", "--yes", "--no-tty", "--allow-secret-key-import", "--import",
cfg.GPGPrivateKey)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to import private key: %s", output)
}
homeDir, err := os.UserHomeDir()
if err != nil {
return err
}
if err := writeRpmMacros(homeDir, cfg.GPGPassPath); err != nil {
return err
}
pubKeysPath := filepath.Join(homeDir, ".rpmdb", "pubkeys")
if err := os.MkdirAll(pubKeysPath, 0700); err != nil {
return fmt.Errorf("failed to make %s: %w", pubKeysPath, err)
}
gpgPub, err := os.ReadFile(cfg.GPGPublicKey)
if err != nil {
return err
}
//nolint:gosec
if err := os.WriteFile(filepath.Join(homeDir, ".rpmdb", "pubkeys", "grafana.key"), gpgPub, 0400); err != nil {
return fmt.Errorf("failed to write pub key to ~/.rpmdb: %w", err)
}
return nil
}

View File

@@ -1,129 +0,0 @@
package grafana
import (
"context"
"fmt"
"log"
"os"
"path/filepath"
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/cryptoutil"
"github.com/grafana/grafana/pkg/build/golangutils"
)
var binaries = []string{"grafana", "grafana-server", "grafana-cli"}
const (
SuffixEnterprise2 = "-enterprise2"
)
const (
ExtensionExe = ".exe"
)
func GrafanaLDFlags(version string, r config.Revision) []string {
cmd := []string{
"-w",
fmt.Sprintf("-X main.version=%s", version),
fmt.Sprintf("-X main.commit=%s", r.SHA256),
fmt.Sprintf("-X main.buildstamp=%d", r.Timestamp),
fmt.Sprintf("-X main.buildBranch=%s", r.Branch),
}
if r.EnterpriseCommit != "" {
cmd = append(cmd, fmt.Sprintf("-X main.enterpriseCommit=%s", r.EnterpriseCommit))
}
return cmd
}
// BinaryFolder returns the path to where the Grafana binary is build given the provided arguments.
func BinaryFolder(edition config.Edition, args BuildArgs) string {
sfx := ""
if edition == config.EditionEnterprise2 {
sfx = SuffixEnterprise2
}
arch := string(args.GoArch)
if args.GoArch == config.ArchARM {
arch = string(args.GoArch) + "v" + args.GoArm
}
format := fmt.Sprintf("%s-%s", args.GoOS, arch)
if args.LibC != "" {
format += fmt.Sprintf("-%s", args.LibC)
}
format += sfx
if args.GoOS == config.OSWindows {
format += ExtensionExe
}
return format
}
func GrafanaDescriptor(opts golangutils.BuildOpts) string {
libcPart := ""
if opts.LibC != "" {
libcPart = fmt.Sprintf("/%s", opts.LibC)
}
arch := string(opts.GoArch)
if opts.GoArch == config.ArchARM {
arch = string(opts.GoArch) + "v" + opts.GoArm
}
return fmt.Sprintf("%s/%s%s", opts.GoOS, arch, libcPart)
}
// BuildGrafanaBinary builds a certain binary according to certain parameters.
func BuildGrafanaBinary(ctx context.Context, name, version string, args BuildArgs, edition config.Edition) error {
opts := args.BuildOpts
opts.ExtraEnv = os.Environ()
revision, err := config.GrafanaRevision(ctx, opts.Workdir)
if err != nil {
return err
}
folder := BinaryFolder(edition, args)
if opts.GoOS == config.OSWindows {
name += ExtensionExe
}
binary := filepath.Join(opts.Workdir, "bin", folder, name)
opts.Output = binary
if err := os.RemoveAll(binary); err != nil {
return fmt.Errorf("failed to remove %q: %w", binary, err)
}
if err := os.RemoveAll(binary + ".md5"); err != nil {
return fmt.Errorf("failed to remove %q: %w", binary+".md5", err)
}
descriptor := GrafanaDescriptor(opts)
log.Printf("Building %q for %s", binary, descriptor)
opts.LdFlags = append(args.LdFlags, GrafanaLDFlags(version, revision)...)
if edition == config.EditionEnterprise2 {
opts.ExtraArgs = []string{"-tags=pro"}
}
log.Printf("Running command 'go %s'", opts.Args())
if err := golangutils.RunBuild(ctx, opts); err != nil {
return err
}
// Create an MD5 checksum of the binary, to be included in the archive for
// automatic upgrades.
if err := cryptoutil.MD5File(binary); err != nil {
return err
}
return nil
}

View File

@@ -1,160 +1 @@
package grafana
import (
"bytes"
"context"
"fmt"
"path/filepath"
"github.com/grafana/grafana/pkg/build/compilers"
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/golangutils"
)
// BuildArgs represent the build parameters that define the "go build" behavior of a single variant.
// These arguments are applied as environment variables and arguments to the "go build" command.
type BuildArgs struct {
golangutils.BuildOpts
DebArch config.Architecture
RPMArch config.Architecture
}
type BuildVariantOpts struct {
Variant config.Variant
Edition config.Edition
Version string
GrafanaDir string
}
// BuildVariant builds a certain variant of the grafana-server and grafana-cli binaries sequentially.
func BuildVariant(ctx context.Context, opts BuildVariantOpts) error {
grafanaDir, err := filepath.Abs(opts.GrafanaDir)
if err != nil {
return err
}
var (
args = VariantBuildArgs(opts.Variant)
)
for _, binary := range binaries {
// Note that for Golang cmd paths we must use the relative path and the Linux file separators (/) even for Windows users.
var (
pkg = fmt.Sprintf("./pkg/cmd/%s", binary)
stdout = bytes.NewBuffer(nil)
stderr = bytes.NewBuffer(nil)
)
args.Workdir = grafanaDir
args.Stdout = stdout
args.Stderr = stderr
args.Package = pkg
if err := BuildGrafanaBinary(ctx, binary, opts.Version, args, opts.Edition); err != nil {
return fmt.Errorf("failed to build %s for %s: %w\nstdout: %s\nstderr: %s", pkg, opts.Variant, err, stdout.String(), stderr.String())
}
}
return nil
}
var ldFlagsStatic = []string{"-linkmode=external", "-extldflags=-static"}
var variantArgs = map[config.Variant]BuildArgs{
config.VariantArmV6: {
BuildOpts: golangutils.BuildOpts{
GoOS: config.OSLinux,
CGoEnabled: true,
GoArch: config.ArchARM,
GoArm: "6",
CC: compilers.ArmV6,
},
DebArch: config.ArchARMHF,
},
config.VariantArmV7: {
BuildOpts: golangutils.BuildOpts{
GoOS: config.OSLinux,
CGoEnabled: true,
GoArch: config.ArchARM,
GoArm: "7",
CC: compilers.Armv7,
},
DebArch: config.ArchARMHF,
RPMArch: config.ArchARMHFP,
},
config.VariantArmV7Musl: {
BuildOpts: golangutils.BuildOpts{
GoOS: config.OSLinux,
CGoEnabled: true,
GoArch: config.ArchARM,
GoArm: "7",
LibC: config.LibCMusl,
CC: compilers.Armv7Musl,
LdFlags: ldFlagsStatic,
},
},
config.VariantArm64: {
BuildOpts: golangutils.BuildOpts{
GoOS: config.OSLinux,
CGoEnabled: true,
GoArch: config.ArchARM64,
CC: compilers.Arm64,
},
DebArch: config.ArchARM64,
RPMArch: "aarch64",
},
config.VariantArm64Musl: {
BuildOpts: golangutils.BuildOpts{
GoOS: config.OSLinux,
GoArch: config.ArchARM64,
CGoEnabled: true,
CC: compilers.Arm64Musl,
LibC: config.LibCMusl,
LdFlags: ldFlagsStatic,
},
},
config.VariantDarwinAmd64: {
BuildOpts: golangutils.BuildOpts{
GoOS: config.OSDarwin,
CGoEnabled: true,
GoArch: config.ArchAMD64,
CC: compilers.Osx64,
},
},
config.VariantWindowsAmd64: {
BuildOpts: golangutils.BuildOpts{
GoOS: config.OSWindows,
GoArch: config.ArchAMD64,
CC: compilers.Win64,
CGoEnabled: true,
CGoCFlags: "-D_WIN32_WINNT=0x0601",
},
},
config.VariantLinuxAmd64: {
BuildOpts: golangutils.BuildOpts{
GoOS: config.OSLinux,
GoArch: config.ArchAMD64,
CC: compilers.LinuxX64,
},
DebArch: config.ArchAMD64,
RPMArch: config.ArchAMD64,
},
config.VariantLinuxAmd64Musl: {
BuildOpts: golangutils.BuildOpts{
GoOS: config.OSLinux,
GoArch: config.ArchAMD64,
CC: compilers.LinuxX64Musl,
LibC: config.LibCMusl,
LdFlags: ldFlagsStatic,
},
},
}
func VariantBuildArgs(v config.Variant) BuildArgs {
if val, ok := variantArgs[v]; ok {
return val
}
return BuildArgs{}
}

View File

@@ -2,63 +2,13 @@ package lerna
import (
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/fsutil"
)
// BuildFrontendPackages will bump the version for the package to the latest canary build
// and build the packages so they are ready for being published, used for generating docs etc.
func BuildFrontendPackages(version string, mode config.Edition, grafanaDir string) error {
err := bumpLernaVersion(version, grafanaDir)
if err != nil {
return err
}
cmd := exec.Command("yarn", "run", "packages:build")
cmd.Dir = grafanaDir
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to build %s frontend packages: %s", mode, output)
}
return nil
}
func bumpLernaVersion(version string, grafanaDir string) error {
//nolint:gosec
cmd := exec.Command("yarn", "run", "lerna", "version", version, "--exact", "--no-git-tag-version", "--no-push", "--force-publish", "-y")
cmd.Dir = grafanaDir
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to bump version for frontend packages: %s\n%s", err, output)
}
return nil
}
func GetLernaVersion(grafanaDir string) (string, error) {
lernaJSONPath := filepath.Join(grafanaDir, "lerna.json")
//nolint:gosec
lernaJSONB, err := os.ReadFile(lernaJSONPath)
if err != nil {
return "", fmt.Errorf("failed to read %q: %w", lernaJSONPath, err)
}
pkgObj := map[string]any{}
if err := json.Unmarshal(lernaJSONB, &pkgObj); err != nil {
return "", fmt.Errorf("failed decoding %q: %w", lernaJSONPath, err)
}
version := pkgObj["version"].(string)
if version == "" {
return "", fmt.Errorf("failed to read version from %q", lernaJSONPath)
}
return strings.TrimSpace(version), nil
}
func PackFrontendPackages(ctx context.Context, tag, grafanaDir, artifactsDir string) error {
exists, err := fsutil.Exists(artifactsDir)
if err != nil {

View File

@@ -1,9 +1,6 @@
package packaging
import (
"fmt"
"strings"
"github.com/grafana/grafana/pkg/build/config"
)
@@ -12,11 +9,18 @@ const MainFolder = "main"
const EnterpriseSfx = "-enterprise"
const CacheSettings = "Cache-Control:public, max-age="
type buildArtifact struct {
Os string
Arch string
urlPostfix string
packagePostfix string
type BuildArtifact struct {
// Distro can be "windows", "darwin", "deb", "rhel", or "linux"
Distro string
Arch string
// Ext is the file extension without the "."
Ext string
Musl bool
RaspberryPi bool
// URL can be set optionally by another process
// Note: check other repos before determining this to be dead code
URL string
}
type PublishConfig struct {
@@ -32,110 +36,101 @@ type PublishConfig struct {
SimulateRelease bool
}
const rhelOS = "rhel"
const debOS = "deb"
func (t buildArtifact) GetURL(baseArchiveURL string, cfg PublishConfig) string {
rev := ""
prefix := "-"
if t.Os == debOS {
prefix = "_"
} else if t.Os == rhelOS {
rev = "-1"
}
version := cfg.Version
verComponents := strings.Split(version, "-")
if len(verComponents) > 2 {
panic(fmt.Sprintf("Version string contains more than one hyphen: %q", version))
}
switch t.Os {
case debOS, rhelOS:
if len(verComponents) > 1 {
// With Debian and RPM packages, it's customary to prefix any pre-release component with a ~, since this
// is considered of lower lexical value than the empty character, and this way pre-release versions are
// considered to be of a lower version than the final version (which lacks this suffix).
version = fmt.Sprintf("%s~%s", verComponents[0], verComponents[1])
}
}
// https://dl.grafana.com/oss/main/grafana_8.5.0~54094pre_armhf.deb: 404 Not Found
url := fmt.Sprintf("%s%s%s%s%s%s", baseArchiveURL, t.packagePostfix, prefix, version, rev, t.urlPostfix)
return url
}
var ArtifactConfigs = []buildArtifact{
var LinuxArtifacts = []BuildArtifact{
{
Os: debOS,
Arch: "arm64",
urlPostfix: "_arm64.deb",
Distro: "linux",
Arch: "arm64",
Ext: "tar.gz",
},
{
Os: rhelOS,
Arch: "arm64",
urlPostfix: ".aarch64.rpm",
Distro: "deb",
Arch: "amd64",
Ext: "deb",
},
{
Os: "linux",
Arch: "arm64",
urlPostfix: ".linux-arm64.tar.gz",
},
// https://github.com/golang/go/issues/58425 disabling arm builds until go issue is resolved
// {
// Os: debOS,
// Arch: "armv7",
// urlPostfix: "_armhf.deb",
// },
// {
// Os: debOS,
// Arch: "armv6",
// packagePostfix: "-rpi",
// urlPostfix: "_armhf.deb",
// },
// {
// Os: rhelOS,
// Arch: "armv7",
// urlPostfix: ".armhfp.rpm",
// },
// {
// Os: "linux",
// Arch: "armv6",
// urlPostfix: ".linux-armv6.tar.gz",
// },
// {
// Os: "linux",
// Arch: "armv7",
// urlPostfix: ".linux-armv7.tar.gz",
// },
{
Os: "darwin",
Arch: "amd64",
urlPostfix: ".darwin-amd64.tar.gz",
Distro: "rhel",
Arch: "x86_64",
Ext: "rpm",
},
{
Os: "deb",
Arch: "amd64",
urlPostfix: "_amd64.deb",
},
{
Os: rhelOS,
Arch: "amd64",
urlPostfix: ".x86_64.rpm",
},
{
Os: "linux",
Arch: "amd64",
urlPostfix: ".linux-amd64.tar.gz",
},
{
Os: "win",
Arch: "amd64",
urlPostfix: ".windows-amd64.zip",
},
{
Os: "win-installer",
Arch: "amd64",
urlPostfix: ".windows-amd64.msi",
Distro: "linux",
Arch: "amd64",
Ext: "tar.gz",
},
}
var DarwinArtifacts = []BuildArtifact{
{
Distro: "darwin",
Arch: "amd64",
Ext: "tar.gz",
},
}
var WindowsArtifacts = []BuildArtifact{
{
Distro: "windows",
Arch: "amd64",
Ext: "zip",
},
{
Distro: "windows",
Arch: "amd64",
Ext: "msi",
},
}
var ARMArtifacts = []BuildArtifact{
{
Distro: "deb",
Arch: "arm64",
Ext: "deb",
},
{
Distro: "rhel",
Arch: "aarch64",
Ext: "rpm",
},
{
Distro: "deb",
Arch: "armhf",
Ext: "deb",
RaspberryPi: false,
},
{
Distro: "deb",
Arch: "armhf",
RaspberryPi: true,
Ext: "deb",
},
{
Distro: "linux",
Arch: "armv6",
Ext: "tar.gz",
},
{
Distro: "linux",
Arch: "armv7",
Ext: "tar.gz",
},
{
Distro: "linux",
Arch: "arm64",
Ext: "tar.gz",
},
{
Distro: "linux",
Arch: "amd64",
Ext: "tar.gz",
},
}
func join(a []BuildArtifact, b ...[]BuildArtifact) []BuildArtifact {
for i := range b {
a = append(a, b[i]...)
}
return a
}
var ArtifactConfigs = join(LinuxArtifacts, DarwinArtifacts, WindowsArtifacts, ARMArtifacts)

View File

@@ -1,2 +0,0 @@
// Package packaging holds functions and types for creating the tar.gz, deb, and rpm packages of Grafana.
package packaging

View File

@@ -1 +0,0 @@
package packaging

File diff suppressed because it is too large Load Diff

View File

@@ -1,23 +0,0 @@
package packaging_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/packaging"
)
func TestPackageRegexp(t *testing.T) {
t.Run("It should match enterprise2 packages", func(t *testing.T) {
rgx := packaging.PackageRegexp(config.EditionEnterprise2)
matches := []string{
"grafana-enterprise2-1.2.3-4567pre.linux-amd64.tar.gz",
"grafana-enterprise2-1.2.3-4567pre.linux-amd64.tar.gz.sha256",
}
for _, v := range matches {
assert.Truef(t, rgx.MatchString(v), "'%s' should match regex '%s'", v, rgx.String())
}
})
}

View File

@@ -1,66 +0,0 @@
package plugins
import (
"context"
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/errutil"
"github.com/grafana/grafana/pkg/build/fsutil"
"github.com/grafana/grafana/pkg/build/syncutil"
)
type PluginSigningMode = int
// BuildPlugins builds internal plugins.
// The built plugins are placed in plugins-bundled/dist/.
func Build(ctx context.Context, grafanaDir string, p syncutil.WorkerPool, g *errutil.Group, verMode *config.BuildConfig) error {
log.Printf("Building plugins in %q...", grafanaDir)
root := filepath.Join(grafanaDir, "plugins-bundled", "internal")
fis, err := os.ReadDir(root)
if err != nil {
return err
}
for i := range fis {
fi := fis[i]
if !fi.IsDir() {
continue
}
dpath := filepath.Join(root, fi.Name())
p.Schedule(g.Wrap(func() error {
log.Printf("Building plugin %q...", dpath)
cmd := exec.Command("yarn", "build")
cmd.Dir = dpath
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("yarn build failed: %s", output)
}
dstPath := filepath.Join("plugins-bundled", "dist", fi.Name())
if err := fsutil.CopyRecursive(filepath.Join(dpath, "dist"), dstPath); err != nil {
return err
}
if !verMode.PluginSignature.Sign {
return nil
}
return BuildManifest(ctx, dstPath, verMode.PluginSignature.AdminSign)
}))
}
if err := g.Wait(); err != nil {
return err
}
log.Printf("Built all plug-ins successfully!")
return nil
}

View File

@@ -1,118 +0,0 @@
package plugins
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"os"
"path/filepath"
"github.com/grafana/grafana/pkg/build/errutil"
"github.com/grafana/grafana/pkg/build/syncutil"
)
// logCloseError executes the closeFunc; if it returns an error, it is logged by the log package.
func logCloseError(closeFunc func() error) {
if err := closeFunc(); err != nil {
log.Println(err)
}
}
// logCloseError executes the closeFunc; if it returns an error, it is logged by the log package.
func logError(err error) {
if err != nil {
log.Println(err)
}
}
// pluginManifest has details of an external plugin package.
type pluginManifest struct {
Name string `json:"name"`
Version string `json:"version"`
Checksum string `json:"checksum"`
}
// pluginsManifest represents a manifest of Grafana's external plugins.
type pluginsManifest struct {
Plugins []pluginManifest `json:"plugins"`
}
// downloadPlugins downloads Grafana plugins that should be bundled into packages.
//
// The plugin archives are downloaded into <grafanaDir>/plugins-bundled.
func Download(ctx context.Context, grafanaDir string, p syncutil.WorkerPool) error {
g, _ := errutil.GroupWithContext(ctx)
log.Println("Downloading external plugins...")
var m pluginsManifest
manifestPath := filepath.Join(grafanaDir, "plugins-bundled", "external.json")
//nolint:gosec
manifestB, err := os.ReadFile(manifestPath)
if err != nil {
return fmt.Errorf("failed to open plugins manifest %q: %w", manifestPath, err)
}
if err := json.Unmarshal(manifestB, &m); err != nil {
return err
}
for i := range m.Plugins {
pm := m.Plugins[i]
p.Schedule(g.Wrap(func() error {
tgt := filepath.Join(grafanaDir, "plugins-bundled", fmt.Sprintf("%s-%s.zip", pm.Name, pm.Version))
//nolint:gosec
out, err := os.Create(tgt)
if err != nil {
return err
}
defer logCloseError(out.Close)
u := fmt.Sprintf("http://storage.googleapis.com/plugins-ci/plugins/%s/%s-%s.zip", pm.Name, pm.Name,
pm.Version)
log.Printf("Downloading plugin %q to %q...", u, tgt)
// nolint:gosec
resp, err := http.Get(u)
if err != nil {
return fmt.Errorf("downloading %q failed: %w", u, err)
}
defer logError(resp.Body.Close())
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("failed to download %q, status code %d", u, resp.StatusCode)
}
if _, err := io.Copy(out, resp.Body); err != nil {
return fmt.Errorf("downloading %q failed: %w", u, err)
}
if err := out.Close(); err != nil {
return fmt.Errorf("downloading %q failed: %w", u, err)
}
//nolint:gosec
fd, err := os.Open(tgt)
if err != nil {
return err
}
defer logCloseError(fd.Close)
h := sha256.New()
if _, err := io.Copy(h, fd); err != nil {
return err
}
chksum := hex.EncodeToString(h.Sum(nil))
if chksum != pm.Checksum {
return fmt.Errorf("plugin %q has bad checksum: %s (expected %s)", u, chksum, pm.Checksum)
}
return Unzip(tgt, filepath.Join(grafanaDir, "plugins-bundled"))
}))
}
return g.Wait()
}

View File

@@ -1,204 +0,0 @@
package plugins
import (
"bytes"
"context"
"crypto/sha256"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"os"
"path/filepath"
)
type manifest struct {
Plugin string `json:"plugin"`
Version string `json:"version"`
Files map[string]string `json:"files"`
}
func getManifest(dpath string, chksums map[string]string) (manifest, error) {
m := manifest{}
type pluginInfo struct {
Version string `json:"version"`
}
type plugin struct {
ID string `json:"id"`
Info pluginInfo `json:"info"`
}
//nolint:gosec
f, err := os.Open(filepath.Join(dpath, "plugin.json"))
if err != nil {
return m, err
}
decoder := json.NewDecoder(f)
var p plugin
if err := decoder.Decode(&p); err != nil {
return m, err
}
if p.ID == "" {
return m, fmt.Errorf("plugin.json doesn't define id")
}
if p.Info.Version == "" {
return m, fmt.Errorf("plugin.json doesn't define info.version")
}
return manifest{
Plugin: p.ID,
Version: p.Info.Version,
Files: chksums,
}, nil
}
// BuildManifest requests a plugin's signed manifest file fromt he Grafana API.
// If signingAdmin is true, the manifest signing admin endpoint (without plugin ID) will be used, and requires
// an admin API key.
func BuildManifest(ctx context.Context, dpath string, signingAdmin bool) error {
log.Printf("Building manifest for plug-in at %q", dpath)
apiKey := os.Getenv("GRAFANA_API_KEY")
if apiKey == "" {
return fmt.Errorf("GRAFANA_API_KEY must be set")
}
manifestPath := filepath.Join(dpath, "MANIFEST.txt")
chksums, err := getChksums(dpath, manifestPath)
if err != nil {
return err
}
m, err := getManifest(dpath, chksums)
if err != nil {
return err
}
b := bytes.NewBuffer(nil)
encoder := json.NewEncoder(b)
if err := encoder.Encode(&m); err != nil {
return err
}
jsonB := b.Bytes()
u := "https://grafana.com/api/plugins/ci/sign"
if !signingAdmin {
u = fmt.Sprintf("https://grafana.com/api/plugins/%s/ci/sign", m.Plugin)
}
log.Printf("Requesting signed manifest from Grafana API...")
req, err := http.NewRequestWithContext(ctx, "POST", u, bytes.NewReader(jsonB))
if err != nil {
return err
}
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", apiKey))
req.Header.Add("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return fmt.Errorf("failed to get signed manifest from Grafana API: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
log.Println("failed to close response body, err: %w", err)
}
}()
if resp.StatusCode != 200 {
msg, err := io.ReadAll(resp.Body)
if err != nil {
log.Printf("Failed to read response body: %s", err)
msg = []byte("")
}
return fmt.Errorf("request for signed manifest failed with status code %d: %s", resp.StatusCode, string(msg))
}
log.Printf("Successfully signed manifest via Grafana API, writing to %q", manifestPath)
//nolint:gosec
f, err := os.Create(manifestPath)
if err != nil {
return fmt.Errorf("failed to create %s: %w", manifestPath, err)
}
defer func() {
if err := f.Close(); err != nil {
log.Println("failed to close file, err: %w", err)
}
}()
if _, err := io.Copy(f, resp.Body); err != nil {
return fmt.Errorf("failed to write %s: %w", manifestPath, err)
}
if err := f.Close(); err != nil {
return fmt.Errorf("failed to write %s: %w", manifestPath, err)
}
return nil
}
func getChksums(dpath, manifestPath string) (map[string]string, error) {
manifestPath = filepath.Clean(manifestPath)
chksums := map[string]string{}
if err := filepath.Walk(dpath, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if fi.IsDir() {
return nil
}
path = filepath.Clean(path)
// Handle symbolic links
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
finalPath, err := filepath.EvalSymlinks(path)
if err != nil {
return err
}
log.Printf("Handling symlink %q, pointing to %q", path, finalPath)
info, err := os.Stat(finalPath)
if err != nil {
return err
}
if info.IsDir() {
return nil
}
if _, err := filepath.Rel(dpath, finalPath); err != nil {
return fmt.Errorf("symbolic link %q targets a file outside of the plugin directory: %q", path, finalPath)
}
if finalPath == manifestPath {
return nil
}
}
if path == manifestPath {
return nil
}
h := sha256.New()
//nolint:gosec
f, err := os.Open(path)
if err != nil {
return err
}
defer logCloseError(f.Close)
if _, err := io.Copy(h, f); err != nil {
return err
}
relPath, err := filepath.Rel(dpath, path)
if err != nil {
return err
}
chksums[relPath] = fmt.Sprintf("%x", h.Sum(nil))
return nil
}); err != nil {
return nil, err
}
return chksums, nil
}

View File

@@ -1,64 +0,0 @@
package plugins
import (
"archive/zip"
"io"
"log"
"os"
"path/filepath"
)
// Unzip unzips a plugin.
func Unzip(fpath, tgtDir string) error {
log.Printf("Unzipping plugin %q into %q...", fpath, tgtDir)
r, err := zip.OpenReader(fpath)
if err != nil {
return err
}
defer logCloseError(r.Close)
// Closure to address file descriptors issue with all the deferred .Close() methods
extractAndWriteFile := func(f *zip.File) error {
log.Printf("Extracting zip member %q...", f.Name)
rc, err := f.Open()
if err != nil {
return err
}
defer logCloseError(rc.Close)
//nolint:gosec
dstPath := filepath.Join(tgtDir, f.Name)
if f.FileInfo().IsDir() {
return os.MkdirAll(dstPath, f.Mode())
}
if err := os.MkdirAll(filepath.Dir(dstPath), f.Mode()); err != nil {
return err
}
//nolint:gosec
fd, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return err
}
defer logCloseError(fd.Close)
// nolint:gosec
if _, err := io.Copy(fd, rc); err != nil {
return err
}
return fd.Close()
}
for _, f := range r.File {
if err := extractAndWriteFile(f); err != nil {
return err
}
}
return nil
}

View File

@@ -1,10 +0,0 @@
package stringutil
func Contains(arr []string, s string) bool {
for _, e := range arr {
if e == s {
return true
}
}
return false
}

View File

@@ -1,43 +0,0 @@
package syncutil
import (
"log"
"runtime"
)
func worker(jobs chan func()) {
for j := range jobs {
j()
}
}
// WorkerPool represents a concurrent worker pool.
type WorkerPool struct {
NumWorkers int
jobs chan func()
}
// NewWorkerPool constructs a new WorkerPool.
func NewWorkerPool(numWorkers int) WorkerPool {
if numWorkers <= 0 {
numWorkers = runtime.NumCPU()
}
log.Printf("Creating worker pool with %d workers", numWorkers)
jobs := make(chan func(), 100)
for i := 0; i < numWorkers; i++ {
go worker(jobs)
}
return WorkerPool{
NumWorkers: numWorkers,
jobs: jobs,
}
}
// Schedule schedules a job to be executed by a worker in the pool.
func (p WorkerPool) Schedule(job func()) {
p.jobs <- job
}
func (p WorkerPool) Close() {
close(p.jobs)
}

View File

@@ -1,9 +1,5 @@
package validation
import (
"context"
)
type ArtifactType int
const (
@@ -15,13 +11,3 @@ type Artifact struct {
Type ArtifactType
URL string
}
// ReleaseArtifacts generates a list of release artifacts
func ReleaseArtifacts(version string) ([]Artifact, error) {
return nil, nil
}
// VerifyRelease tests that a that, given the information, a release will completed wholly and successfully.
func VerifyRelease(ctx context.Context, version string) (bool, error) {
return false, nil
}

View File

@@ -0,0 +1,31 @@
package versions
import "regexp"
var semverRegex = regexp.MustCompile(`^(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`)
type Semver struct {
Major string
Minor string
Patch string
Prerelease string
BuildMetadata string
}
func ParseSemver(version string) Semver {
matches := semverRegex.FindStringSubmatch(version)
results := make(map[string]string)
for i, name := range semverRegex.SubexpNames() {
if i != 0 && name != "" {
results[name] = matches[i]
}
}
return Semver{
Major: results["major"],
Minor: results["minor"],
Patch: results["patch"],
Prerelease: results["prerelease"],
BuildMetadata: results["buildmetadata"],
}
}

View File

@@ -13,15 +13,17 @@ import (
)
var (
reGrafanaTag = regexp.MustCompile(`^v(\d+\.\d+\.\d+$)`)
reGrafanaTagPreview = regexp.MustCompile(`^v(\d+\.\d+\.\d+-preview)`)
reGrafanaTagCustom = regexp.MustCompile(`^v(\d+\.\d+\.\d+-\w+)`)
reGrafanaTag = regexp.MustCompile(`^v(\d+\.\d+\.\d+$)`)
reGrafanaTagPreview = regexp.MustCompile(`^v(\d+\.\d+\.\d+-preview)`)
reGrafanaTagCustom = regexp.MustCompile(`^v(\d+\.\d+\.\d+-\w+)`)
reGrafanaTagSecurity = regexp.MustCompile(`^v(\d+\.\d+\.\d+\+\w+\-\d+)`)
)
const (
Latest = "latest"
Next = "next"
Test = "test"
Latest = "latest"
Next = "next"
Test = "test"
Security = "security"
)
type Version struct {
@@ -152,6 +154,11 @@ func GetVersion(tag string) (*Version, error) {
Version: reGrafanaTagCustom.FindStringSubmatch(tag)[1],
Channel: Test,
}
case reGrafanaTagSecurity.MatchString(tag):
version = Version{
Version: reGrafanaTagSecurity.FindStringSubmatch(tag)[1],
Channel: Security,
}
default:
return nil, fmt.Errorf("%s not a supported Grafana version, exitting", tag)
}

View File

@@ -124,6 +124,10 @@ func (h *ExpressionQueryReader) ReadQuery(
}
case QueryTypeSQL:
enabled := enableSqlExpressions(h)
if !enabled {
return eq, fmt.Errorf("sqlExpressions is not implemented")
}
q := &SQLExpression{}
err = iter.ReadVal(q)
if err == nil {
@@ -184,3 +188,11 @@ func getReferenceVar(exp string, refId string) (string, error) {
}
return exp, nil
}
func enableSqlExpressions(h *ExpressionQueryReader) bool {
enabled := !h.features.IsEnabledGlobally(featuremgmt.FlagSqlExpressions)
if enabled {
return false
}
return false
}

26
pkg/expr/sql/db.go Normal file
View File

@@ -0,0 +1,26 @@
package sql
import (
"errors"
"github.com/grafana/grafana-plugin-sdk-go/data"
)
type DB struct {
}
func (db *DB) TablesList(rawSQL string) ([]string, error) {
return nil, errors.New("not implemented")
}
func (db *DB) RunCommands(commands []string) (string, error) {
return "", errors.New("not implemented")
}
func (db *DB) QueryFramesInto(name string, query string, frames []*data.Frame, f *data.Frame) error {
return errors.New("not implemented")
}
func NewInMemoryDB() *DB {
return &DB{}
}

View File

@@ -8,7 +8,6 @@ import (
"github.com/grafana/grafana/pkg/infra/log"
"github.com/jeremywohl/flatten"
"github.com/scottlepp/go-duck/duck"
)
const (
@@ -21,7 +20,7 @@ var logger = log.New("sql_expr")
// TablesList returns a list of tables for the sql statement
func TablesList(rawSQL string) ([]string, error) {
duckDB := duck.NewInMemoryDB()
duckDB := NewInMemoryDB()
rawSQL = strings.Replace(rawSQL, "'", "''", -1)
cmd := fmt.Sprintf("SELECT json_serialize_sql('%s')", rawSQL)
ret, err := duckDB.RunCommands([]string{cmd})

View File

@@ -7,7 +7,6 @@ import (
"time"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/scottlepp/go-duck/duck"
"github.com/grafana/grafana/pkg/apimachinery/errutil"
"github.com/grafana/grafana/pkg/expr/mathexp"
@@ -94,11 +93,11 @@ func (gr *SQLCommand) Execute(ctx context.Context, now time.Time, vars mathexp.V
rsp := mathexp.Results{}
duckDB := duck.NewInMemoryDB()
db := sql.NewInMemoryDB()
var frame = &data.Frame{}
logger.Debug("Executing query", "query", gr.query, "frames", len(allFrames))
err := duckDB.QueryFramesInto(gr.refID, gr.query, allFrames, frame)
err := db.QueryFramesInto(gr.refID, gr.query, allFrames, frame)
if err != nil {
logger.Error("Failed to query frames", "error", err.Error())
rsp.Error = err

View File

@@ -175,7 +175,7 @@ func (cma *CloudMigrationAPI) GetSessionList(c *contextmodel.ReqContext) respons
ctx, span := cma.tracer.Start(c.Req.Context(), "MigrationAPI.GetSessionList")
defer span.End()
sl, err := cma.cloudMigrationService.GetSessionList(ctx)
sl, err := cma.cloudMigrationService.GetSessionList(ctx, c.OrgID)
if err != nil {
span.SetStatus(codes.Error, "session list error")
span.RecordError(err)
@@ -208,7 +208,7 @@ func (cma *CloudMigrationAPI) GetSession(c *contextmodel.ReqContext) response.Re
return response.Error(http.StatusBadRequest, "invalid session uid", err)
}
s, err := cma.cloudMigrationService.GetSession(ctx, uid)
s, err := cma.cloudMigrationService.GetSession(ctx, c.OrgID, uid)
if err != nil {
span.SetStatus(codes.Error, "session not found")
span.RecordError(err)
@@ -247,6 +247,7 @@ func (cma *CloudMigrationAPI) CreateSession(c *contextmodel.ReqContext) response
}
s, err := cma.cloudMigrationService.CreateSession(ctx, cloudmigration.CloudMigrationSessionRequest{
AuthToken: cmd.AuthToken,
OrgID: c.SignedInUser.OrgID,
})
if err != nil {
span.SetStatus(codes.Error, "session creation error")
@@ -285,7 +286,7 @@ func (cma *CloudMigrationAPI) DeleteSession(c *contextmodel.ReqContext) response
return response.ErrOrFallback(http.StatusBadRequest, "invalid session uid", err)
}
_, err := cma.cloudMigrationService.DeleteSession(ctx, uid)
_, err := cma.cloudMigrationService.DeleteSession(ctx, c.OrgID, uid)
if err != nil {
span.SetStatus(codes.Error, "session delete error")
span.RecordError(err)
@@ -365,6 +366,7 @@ func (cma *CloudMigrationAPI) GetSnapshot(c *contextmodel.ReqContext) response.R
SessionUID: sessUid,
ResultPage: c.QueryInt("resultPage"),
ResultLimit: c.QueryInt("resultLimit"),
OrgID: c.SignedInUser.OrgID,
}
if q.ResultLimit == 0 {
q.ResultLimit = 100
@@ -447,6 +449,7 @@ func (cma *CloudMigrationAPI) GetSnapshotList(c *contextmodel.ReqContext) respon
Limit: c.QueryInt("limit"),
Page: c.QueryInt("page"),
Sort: c.Query("sort"),
OrgID: c.SignedInUser.OrgID,
}
if q.Limit == 0 {
q.Limit = 100
@@ -507,7 +510,7 @@ func (cma *CloudMigrationAPI) UploadSnapshot(c *contextmodel.ReqContext) respons
return response.ErrOrFallback(http.StatusBadRequest, "invalid snapshot uid", err)
}
if err := cma.cloudMigrationService.UploadSnapshot(ctx, sessUid, snapshotUid); err != nil {
if err := cma.cloudMigrationService.UploadSnapshot(ctx, c.OrgID, sessUid, snapshotUid); err != nil {
span.SetStatus(codes.Error, "error uploading snapshot")
span.RecordError(err)

View File

@@ -17,13 +17,13 @@ type Service interface {
DeleteToken(ctx context.Context, uid string) error
CreateSession(ctx context.Context, req CloudMigrationSessionRequest) (*CloudMigrationSessionResponse, error)
GetSession(ctx context.Context, migUID string) (*CloudMigrationSession, error)
DeleteSession(ctx context.Context, migUID string) (*CloudMigrationSession, error)
GetSessionList(context.Context) (*CloudMigrationSessionListResponse, error)
GetSession(ctx context.Context, orgID int64, migUID string) (*CloudMigrationSession, error)
DeleteSession(ctx context.Context, orgID int64, migUID string) (*CloudMigrationSession, error)
GetSessionList(ctx context.Context, orgID int64) (*CloudMigrationSessionListResponse, error)
CreateSnapshot(ctx context.Context, signedInUser *user.SignedInUser, sessionUid string) (*CloudMigrationSnapshot, error)
GetSnapshot(ctx context.Context, query GetSnapshotsQuery) (*CloudMigrationSnapshot, error)
GetSnapshotList(ctx context.Context, query ListSnapshotsQuery) ([]CloudMigrationSnapshot, error)
UploadSnapshot(ctx context.Context, sessionUid string, snapshotUid string) error
UploadSnapshot(ctx context.Context, orgID int64, sessionUid string, snapshotUid string) error
CancelSnapshot(ctx context.Context, sessionUid string, snapshotUid string) error
}

View File

@@ -359,10 +359,10 @@ func (s *Service) DeleteToken(ctx context.Context, tokenID string) error {
return nil
}
func (s *Service) GetSession(ctx context.Context, uid string) (*cloudmigration.CloudMigrationSession, error) {
func (s *Service) GetSession(ctx context.Context, orgID int64, uid string) (*cloudmigration.CloudMigrationSession, error) {
ctx, span := s.tracer.Start(ctx, "CloudMigrationService.GetSession")
defer span.End()
migration, err := s.store.GetMigrationSessionByUID(ctx, uid)
migration, err := s.store.GetMigrationSessionByUID(ctx, orgID, uid)
if err != nil {
return nil, err
}
@@ -370,11 +370,11 @@ func (s *Service) GetSession(ctx context.Context, uid string) (*cloudmigration.C
return migration, nil
}
func (s *Service) GetSessionList(ctx context.Context) (*cloudmigration.CloudMigrationSessionListResponse, error) {
func (s *Service) GetSessionList(ctx context.Context, orgID int64) (*cloudmigration.CloudMigrationSessionListResponse, error) {
ctx, span := s.tracer.Start(ctx, "CloudMigrationService.GetSessionList")
defer span.End()
values, err := s.store.GetCloudMigrationSessionList(ctx)
values, err := s.store.GetCloudMigrationSessionList(ctx, orgID)
if err != nil {
return nil, fmt.Errorf("retrieving session list from store: %w", err)
}
@@ -405,7 +405,7 @@ func (s *Service) CreateSession(ctx context.Context, cmd cloudmigration.CloudMig
return nil, fmt.Errorf("invalid token") // don't want to leak info here
}
migration := token.ToMigration()
migration := token.ToMigration(cmd.OrgID)
// validate token against GMS before saving
if err := s.ValidateToken(ctx, migration); err != nil {
return nil, fmt.Errorf("token validation: %w", err)
@@ -426,11 +426,11 @@ func (s *Service) CreateSession(ctx context.Context, cmd cloudmigration.CloudMig
}, nil
}
func (s *Service) DeleteSession(ctx context.Context, sessionUID string) (*cloudmigration.CloudMigrationSession, error) {
func (s *Service) DeleteSession(ctx context.Context, orgID int64, sessionUID string) (*cloudmigration.CloudMigrationSession, error) {
ctx, span := s.tracer.Start(ctx, "CloudMigrationService.DeleteSession")
defer span.End()
session, snapshots, err := s.store.DeleteMigrationSessionByUID(ctx, sessionUID)
session, snapshots, err := s.store.DeleteMigrationSessionByUID(ctx, orgID, sessionUID)
if err != nil {
s.report(ctx, session, gmsclient.EventDisconnect, 0, err)
return nil, fmt.Errorf("deleting migration from db for session %v: %w", sessionUID, err)
@@ -448,7 +448,7 @@ func (s *Service) CreateSnapshot(ctx context.Context, signedInUser *user.SignedI
defer span.End()
// fetch session for the gms auth token
session, err := s.store.GetMigrationSessionByUID(ctx, sessionUid)
session, err := s.store.GetMigrationSessionByUID(ctx, signedInUser.GetOrgID(), sessionUid)
if err != nil {
return nil, fmt.Errorf("fetching migration session for uid %s: %w", sessionUid, err)
}
@@ -535,13 +535,13 @@ func (s *Service) GetSnapshot(ctx context.Context, query cloudmigration.GetSnaps
ctx, span := s.tracer.Start(ctx, "CloudMigrationService.GetSnapshot")
defer span.End()
sessionUid, snapshotUid := query.SessionUID, query.SnapshotUID
snapshot, err := s.store.GetSnapshotByUID(ctx, sessionUid, snapshotUid, query.ResultPage, query.ResultLimit)
orgID, sessionUid, snapshotUid := query.OrgID, query.SessionUID, query.SnapshotUID
snapshot, err := s.store.GetSnapshotByUID(ctx, orgID, sessionUid, snapshotUid, query.ResultPage, query.ResultLimit)
if err != nil {
return nil, fmt.Errorf("fetching snapshot for uid %s: %w", snapshotUid, err)
}
session, err := s.store.GetMigrationSessionByUID(ctx, sessionUid)
session, err := s.store.GetMigrationSessionByUID(ctx, orgID, sessionUid)
if err != nil {
return nil, fmt.Errorf("fetching session for uid %s: %w", sessionUid, err)
}
@@ -584,7 +584,7 @@ func (s *Service) GetSnapshot(ctx context.Context, query cloudmigration.GetSnaps
}
// Refresh the snapshot after the update
snapshot, err = s.store.GetSnapshotByUID(ctx, sessionUid, snapshotUid, query.ResultPage, query.ResultLimit)
snapshot, err = s.store.GetSnapshotByUID(ctx, orgID, sessionUid, snapshotUid, query.ResultPage, query.ResultLimit)
if err != nil {
return nil, fmt.Errorf("fetching snapshot for uid %s: %w", snapshotUid, err)
}
@@ -612,7 +612,7 @@ func (s *Service) GetSnapshotList(ctx context.Context, query cloudmigration.List
return snapshotList, nil
}
func (s *Service) UploadSnapshot(ctx context.Context, sessionUid string, snapshotUid string) error {
func (s *Service) UploadSnapshot(ctx context.Context, orgID int64, sessionUid string, snapshotUid string) error {
ctx, span := s.tracer.Start(ctx, "CloudMigrationService.UploadSnapshot",
trace.WithAttributes(
attribute.String("sessionUid", sessionUid),
@@ -622,7 +622,7 @@ func (s *Service) UploadSnapshot(ctx context.Context, sessionUid string, snapsho
defer span.End()
// fetch session for the gms auth token
session, err := s.store.GetMigrationSessionByUID(ctx, sessionUid)
session, err := s.store.GetMigrationSessionByUID(ctx, orgID, sessionUid)
if err != nil {
return fmt.Errorf("fetching migration session for uid %s: %w", sessionUid, err)
}
@@ -630,6 +630,7 @@ func (s *Service) UploadSnapshot(ctx context.Context, sessionUid string, snapsho
snapshot, err := s.GetSnapshot(ctx, cloudmigration.GetSnapshotsQuery{
SnapshotUID: snapshotUid,
SessionUID: sessionUid,
OrgID: orgID,
})
if err != nil {
return fmt.Errorf("fetching snapshot with uid %s: %w", snapshotUid, err)

View File

@@ -29,11 +29,11 @@ func (s *NoopServiceImpl) ValidateToken(ctx context.Context, cm cloudmigration.C
return cloudmigration.ErrFeatureDisabledError
}
func (s *NoopServiceImpl) GetSession(ctx context.Context, uid string) (*cloudmigration.CloudMigrationSession, error) {
func (s *NoopServiceImpl) GetSession(ctx context.Context, orgID int64, uid string) (*cloudmigration.CloudMigrationSession, error) {
return nil, cloudmigration.ErrFeatureDisabledError
}
func (s *NoopServiceImpl) GetSessionList(ctx context.Context) (*cloudmigration.CloudMigrationSessionListResponse, error) {
func (s *NoopServiceImpl) GetSessionList(ctx context.Context, orgID int64) (*cloudmigration.CloudMigrationSessionListResponse, error) {
return nil, cloudmigration.ErrFeatureDisabledError
}
@@ -41,7 +41,7 @@ func (s *NoopServiceImpl) CreateSession(ctx context.Context, cm cloudmigration.C
return nil, cloudmigration.ErrFeatureDisabledError
}
func (s *NoopServiceImpl) DeleteSession(ctx context.Context, uid string) (*cloudmigration.CloudMigrationSession, error) {
func (s *NoopServiceImpl) DeleteSession(ctx context.Context, orgID int64, uid string) (*cloudmigration.CloudMigrationSession, error) {
return nil, cloudmigration.ErrFeatureDisabledError
}
@@ -57,7 +57,7 @@ func (s *NoopServiceImpl) GetSnapshotList(ctx context.Context, query cloudmigrat
return nil, cloudmigration.ErrFeatureDisabledError
}
func (s *NoopServiceImpl) UploadSnapshot(ctx context.Context, sessionUid string, snapshotUid string) error {
func (s *NoopServiceImpl) UploadSnapshot(ctx context.Context, orgID int64, sessionUid string, snapshotUid string) error {
return cloudmigration.ErrFeatureDisabledError
}

View File

@@ -324,22 +324,19 @@ func Test_OnlyQueriesStatusFromGMSWhenRequired(t *testing.T) {
func Test_DeletedDashboardsNotMigrated(t *testing.T) {
s := setUpServiceTest(t, false).(*Service)
/** NOTE: this is not used at the moment since we changed the service
// modify what the mock returns for just this test case
dashMock := s.dashboardService.(*dashboards.FakeDashboardService)
dashMock.On("GetAllDashboards", mock.Anything).Return(
[]*dashboards.Dashboard{
{
UID: "1",
Data: simplejson.New(),
},
{
UID: "2",
Data: simplejson.New(),
Deleted: time.Now(),
},
{UID: "1", OrgID: 1, Data: simplejson.New()},
{UID: "2", OrgID: 1, Data: simplejson.New(), Deleted: time.Now()},
},
nil,
)
*/
data, err := s.getMigrationDataJSON(context.TODO(), &user.SignedInUser{OrgID: 1})
assert.NoError(t, err)
@@ -512,7 +509,7 @@ func TestDeleteSession(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
session, err := s.DeleteSession(ctx, "invalid-session-uid")
session, err := s.DeleteSession(ctx, 2, "invalid-session-uid")
require.Nil(t, session)
require.Error(t, err)
})
@@ -527,6 +524,7 @@ func TestDeleteSession(t *testing.T) {
cmd := cloudmigration.CloudMigrationSessionRequest{
AuthToken: createTokenResp.Token,
OrgID: 3,
}
createResp, err := s.CreateSession(ctx, cmd)
@@ -534,12 +532,12 @@ func TestDeleteSession(t *testing.T) {
require.NotEmpty(t, createResp.UID)
require.NotEmpty(t, createResp.Slug)
deletedSession, err := s.DeleteSession(ctx, createResp.UID)
deletedSession, err := s.DeleteSession(ctx, cmd.OrgID, createResp.UID)
require.NoError(t, err)
require.NotNil(t, deletedSession)
require.Equal(t, deletedSession.UID, createResp.UID)
notFoundSession, err := s.GetSession(ctx, deletedSession.UID)
notFoundSession, err := s.GetSession(ctx, cmd.OrgID, deletedSession.UID)
require.ErrorIs(t, err, cloudmigration.ErrMigrationNotFound)
require.Nil(t, notFoundSession)
})
@@ -793,6 +791,21 @@ func setUpServiceTest(t *testing.T, withDashboardMock bool) cloudmigration.Servi
)
require.NoError(t, err)
// Insert test data for dashboard test, should be removed later when we move GetAllDashboardsByOrgId() to the dashboard service
_, err = sqlStore.GetSqlxSession().Exec(context.Background(), `
INSERT INTO
dashboard (id, org_id, data, deleted, slug, title, created, version, updated )
VALUES
(1, 1, '{}', null, 'asdf', 'ghjk', '2024-03-27 15:30:43.000' , '1','2024-03-27 15:30:43.000' ),
(2, 1, '{}', '2024-03-27 15:30:43.000','qwert', 'yuio', '2024-03-27 15:30:43.000' , '2','2024-03-27 15:30:43.000'),
(3, 2, '{}', null, 'asdf', 'ghjk', '2024-03-27 15:30:43.000' , '1','2024-03-27 15:30:43.000' ),
(4, 2, '{}', '2024-03-27 15:30:43.000','qwert', 'yuio', '2024-03-27 15:30:43.000' , '2','2024-03-27 15:30:43.000');
`,
)
if err != nil {
require.NoError(t, err)
}
s, err := ProvideService(
cfg,
httpclient.NewProvider(),

View File

@@ -57,21 +57,21 @@ func (m FakeServiceImpl) CreateSession(_ context.Context, _ cloudmigration.Cloud
}, nil
}
func (m FakeServiceImpl) GetSession(_ context.Context, _ string) (*cloudmigration.CloudMigrationSession, error) {
func (m FakeServiceImpl) GetSession(_ context.Context, _ int64, _ string) (*cloudmigration.CloudMigrationSession, error) {
if m.ReturnError {
return nil, fmt.Errorf("mock error")
}
return &cloudmigration.CloudMigrationSession{UID: "fake"}, nil
}
func (m FakeServiceImpl) DeleteSession(_ context.Context, _ string) (*cloudmigration.CloudMigrationSession, error) {
func (m FakeServiceImpl) DeleteSession(_ context.Context, _ int64, _ string) (*cloudmigration.CloudMigrationSession, error) {
if m.ReturnError {
return nil, fmt.Errorf("mock error")
}
return &cloudmigration.CloudMigrationSession{UID: "fake"}, nil
}
func (m FakeServiceImpl) GetSessionList(_ context.Context) (*cloudmigration.CloudMigrationSessionListResponse, error) {
func (m FakeServiceImpl) GetSessionList(_ context.Context, _ int64) (*cloudmigration.CloudMigrationSessionListResponse, error) {
if m.ReturnError {
return nil, fmt.Errorf("mock error")
}
@@ -154,7 +154,7 @@ func (m FakeServiceImpl) GetSnapshotList(ctx context.Context, query cloudmigrati
return cloudSnapshots, nil
}
func (m FakeServiceImpl) UploadSnapshot(ctx context.Context, sessionUid string, snapshotUid string) error {
func (m FakeServiceImpl) UploadSnapshot(ctx context.Context, _ int64, sessionUid string, snapshotUid string) error {
if m.ReturnError {
return fmt.Errorf("mock error")
}

View File

@@ -39,7 +39,7 @@ func (s *Service) getMigrationDataJSON(ctx context.Context, signedInUser *user.S
defer span.End()
// Data sources
dataSources, err := s.getDataSourceCommands(ctx)
dataSources, err := s.getDataSourceCommands(ctx, signedInUser)
if err != nil {
s.log.Error("Failed to get datasources", "err", err)
return nil, err
@@ -121,17 +121,17 @@ func (s *Service) getMigrationDataJSON(ctx context.Context, signedInUser *user.S
return migrationData, nil
}
func (s *Service) getDataSourceCommands(ctx context.Context) ([]datasources.AddDataSourceCommand, error) {
func (s *Service) getDataSourceCommands(ctx context.Context, signedInUser *user.SignedInUser) ([]datasources.AddDataSourceCommand, error) {
ctx, span := s.tracer.Start(ctx, "CloudMigrationService.getDataSourceCommands")
defer span.End()
dataSources, err := s.dsService.GetAllDataSources(ctx, &datasources.GetAllDataSourcesQuery{})
dataSources, err := s.dsService.GetDataSources(ctx, &datasources.GetDataSourcesQuery{OrgID: signedInUser.GetOrgID()})
if err != nil {
s.log.Error("Failed to get all datasources", "err", err)
return nil, err
}
result := []datasources.AddDataSourceCommand{}
result := make([]datasources.AddDataSourceCommand, 0, len(dataSources))
for _, dataSource := range dataSources {
// Decrypt secure json to send raw credentials
decryptedData, err := s.secretsService.DecryptJsonData(ctx, dataSource.SecureJsonData)
@@ -166,7 +166,7 @@ func (s *Service) getDashboardAndFolderCommands(ctx context.Context, signedInUse
ctx, span := s.tracer.Start(ctx, "CloudMigrationService.getDashboardAndFolderCommands")
defer span.End()
dashs, err := s.dashboardService.GetAllDashboards(ctx)
dashs, err := s.store.GetAllDashboardsByOrgId(ctx, signedInUser.GetOrgID())
if err != nil {
return nil, nil, err
}
@@ -192,20 +192,21 @@ func (s *Service) getDashboardAndFolderCommands(ctx context.Context, signedInUse
folders, err := s.folderService.GetFolders(ctx, folder.GetFoldersQuery{
UIDs: folderUids,
SignedInUser: signedInUser,
OrgID: signedInUser.GetOrgID(),
WithFullpathUIDs: true,
})
if err != nil {
return nil, nil, err
}
folderCmds := make([]folder.CreateFolderCommand, len(folders))
for i, f := range folders {
folderCmds[i] = folder.CreateFolderCommand{
folderCmds := make([]folder.CreateFolderCommand, 0, len(folders))
for _, f := range folders {
folderCmds = append(folderCmds, folder.CreateFolderCommand{
UID: f.UID,
Title: f.Title,
Description: f.Description,
ParentUID: f.ParentUID,
}
})
}
return dashboardCmds, folderCmds, nil
@@ -554,6 +555,7 @@ func (s *Service) getFolderNamesForFolderUIDs(ctx context.Context, signedInUser
folders, err := s.folderService.GetFolders(ctx, folder.GetFoldersQuery{
UIDs: folderUIDs,
SignedInUser: signedInUser,
OrgID: signedInUser.GetOrgID(),
WithFullpathUIDs: true,
})
if err != nil {
@@ -574,15 +576,18 @@ func (s *Service) getFolderNamesForFolderUIDs(ctx context.Context, signedInUser
// getParentNames finds the parent names for resources and returns a map of data type: {data UID : parentName}
// for dashboards, folders and library elements - the parent is the parent folder
func (s *Service) getParentNames(ctx context.Context, signedInUser *user.SignedInUser, dashboards []dashboards.Dashboard, folders []folder.CreateFolderCommand, libraryElements []libraryElement) (map[cloudmigration.MigrateDataType]map[string](string), error) {
parentNamesByType := make(map[cloudmigration.MigrateDataType]map[string](string))
parentNamesByType := make(map[cloudmigration.MigrateDataType]map[string]string)
for _, dataType := range currentMigrationTypes {
parentNamesByType[dataType] = make(map[string]string)
}
// Obtain list of unique folderUIDs
parentFolderUIDsSet := make(map[string]struct{}, len(dashboards)+len(folders)+len(libraryElements))
parentFolderUIDsSet := make(map[string]struct{})
for _, dashboard := range dashboards {
parentFolderUIDsSet[dashboard.FolderUID] = struct{}{}
// we dont need the root folder
if dashboard.FolderUID != "" {
parentFolderUIDsSet[dashboard.FolderUID] = struct{}{}
}
}
for _, f := range folders {
parentFolderUIDsSet[f.ParentUID] = struct{}{}

View File

@@ -4,24 +4,29 @@ import (
"context"
"github.com/grafana/grafana/pkg/services/cloudmigration"
"github.com/grafana/grafana/pkg/services/dashboards"
)
type store interface {
CreateMigrationSession(ctx context.Context, session cloudmigration.CloudMigrationSession) (*cloudmigration.CloudMigrationSession, error)
GetMigrationSessionByUID(ctx context.Context, uid string) (*cloudmigration.CloudMigrationSession, error)
GetCloudMigrationSessionList(ctx context.Context) ([]*cloudmigration.CloudMigrationSession, error)
GetMigrationSessionByUID(ctx context.Context, orgID int64, uid string) (*cloudmigration.CloudMigrationSession, error)
GetCloudMigrationSessionList(ctx context.Context, orgID int64) ([]*cloudmigration.CloudMigrationSession, error)
// DeleteMigrationSessionByUID deletes the migration session, and all the related snapshot and resources.
// the work is done in a transaction.
DeleteMigrationSessionByUID(ctx context.Context, uid string) (*cloudmigration.CloudMigrationSession, []cloudmigration.CloudMigrationSnapshot, error)
DeleteMigrationSessionByUID(ctx context.Context, orgID int64, uid string) (*cloudmigration.CloudMigrationSession, []cloudmigration.CloudMigrationSnapshot, error)
CreateSnapshot(ctx context.Context, snapshot cloudmigration.CloudMigrationSnapshot) (string, error)
UpdateSnapshot(ctx context.Context, snapshot cloudmigration.UpdateSnapshotCmd) error
GetSnapshotByUID(ctx context.Context, sessUid, id string, resultPage int, resultLimit int) (*cloudmigration.CloudMigrationSnapshot, error)
GetSnapshotByUID(ctx context.Context, orgID int64, sessUid, id string, resultPage int, resultLimit int) (*cloudmigration.CloudMigrationSnapshot, error)
GetSnapshotList(ctx context.Context, query cloudmigration.ListSnapshotsQuery) ([]cloudmigration.CloudMigrationSnapshot, error)
DeleteSnapshot(ctx context.Context, snapshotUid string) error
CreateUpdateSnapshotResources(ctx context.Context, snapshotUid string, resources []cloudmigration.CloudMigrationResource) error
GetSnapshotResources(ctx context.Context, snapshotUid string, page int, limit int) ([]cloudmigration.CloudMigrationResource, error)
GetSnapshotResourceStats(ctx context.Context, snapshotUid string) (*cloudmigration.SnapshotResourceStats, error)
DeleteSnapshotResources(ctx context.Context, snapshotUid string) error
// Deleted because were not used externally
// - DeleteSnapshot(ctx context.Context, snapshotUid string) error
// - CreateUpdateSnapshotResources(ctx context.Context, snapshotUid string, resources []cloudmigration.CloudMigrationResource) error
// - GetSnapshotResources(ctx context.Context, snapshotUid string, page int, limit int) ([]cloudmigration.CloudMigrationResource, error)
// - GetSnapshotResourceStats(ctx context.Context, snapshotUid string) (*cloudmigration.SnapshotResourceStats, error)
// - DeleteSnapshotResources(ctx context.Context, snapshotUid string) error
// TODO move this function dashboards/databases/databases.go
GetAllDashboardsByOrgId(ctx context.Context, orgID int64) ([]*dashboards.Dashboard, error)
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/services/cloudmigration"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/secrets"
secretskv "github.com/grafana/grafana/pkg/services/secrets/kvstore"
"github.com/grafana/grafana/pkg/services/sqlstore"
@@ -29,10 +30,10 @@ const (
GetSnapshotListSortingLatest = "latest"
)
func (ss *sqlStore) GetMigrationSessionByUID(ctx context.Context, uid string) (*cloudmigration.CloudMigrationSession, error) {
func (ss *sqlStore) GetMigrationSessionByUID(ctx context.Context, orgID int64, uid string) (*cloudmigration.CloudMigrationSession, error) {
var cm cloudmigration.CloudMigrationSession
err := ss.db.WithDbSession(ctx, func(sess *db.Session) error {
exist, err := sess.Where("uid=?", uid).Get(&cm)
exist, err := sess.Where("org_id=? AND uid=?", orgID, uid).Get(&cm)
if err != nil {
return err
}
@@ -74,11 +75,10 @@ func (ss *sqlStore) CreateMigrationSession(ctx context.Context, migration cloudm
return &migration, nil
}
func (ss *sqlStore) GetCloudMigrationSessionList(ctx context.Context) ([]*cloudmigration.CloudMigrationSession, error) {
func (ss *sqlStore) GetCloudMigrationSessionList(ctx context.Context, orgID int64) ([]*cloudmigration.CloudMigrationSession, error) {
var migrations = make([]*cloudmigration.CloudMigrationSession, 0)
err := ss.db.WithDbSession(ctx, func(sess *db.Session) error {
sess.OrderBy("created DESC")
return sess.Find(&migrations)
return sess.Where("org_id=?", orgID).OrderBy("created DESC").Find(&migrations)
})
if err != nil {
return nil, err
@@ -95,10 +95,10 @@ func (ss *sqlStore) GetCloudMigrationSessionList(ctx context.Context) ([]*cloudm
return migrations, nil
}
func (ss *sqlStore) DeleteMigrationSessionByUID(ctx context.Context, uid string) (*cloudmigration.CloudMigrationSession, []cloudmigration.CloudMigrationSnapshot, error) {
func (ss *sqlStore) DeleteMigrationSessionByUID(ctx context.Context, orgID int64, uid string) (*cloudmigration.CloudMigrationSession, []cloudmigration.CloudMigrationSnapshot, error) {
var c cloudmigration.CloudMigrationSession
err := ss.db.WithDbSession(ctx, func(sess *db.Session) error {
exist, err := sess.Where("uid=?", uid).Get(&c)
exist, err := sess.Where("org_id=? AND uid=?", orgID, uid).Get(&c)
if err != nil {
return err
}
@@ -124,11 +124,11 @@ func (ss *sqlStore) DeleteMigrationSessionByUID(ctx context.Context, uid string)
err = ss.db.InTransaction(ctx, func(ctx context.Context) error {
for _, snapshot := range snapshots {
err := ss.DeleteSnapshotResources(ctx, snapshot.UID)
err := ss.deleteSnapshotResources(ctx, snapshot.UID)
if err != nil {
return fmt.Errorf("deleting snapshot resource from db: %w", err)
}
err = ss.DeleteSnapshot(ctx, snapshot.UID)
err = ss.deleteSnapshot(ctx, orgID, snapshot.UID)
if err != nil {
return fmt.Errorf("deleting snapshot from db: %w", err)
}
@@ -214,7 +214,7 @@ func (ss *sqlStore) UpdateSnapshot(ctx context.Context, update cloudmigration.Up
// Update resources if set
if len(update.Resources) > 0 {
if err := ss.CreateUpdateSnapshotResources(ctx, update.UID, update.Resources); err != nil {
if err := ss.createUpdateSnapshotResources(ctx, update.UID, update.Resources); err != nil {
return err
}
}
@@ -224,7 +224,7 @@ func (ss *sqlStore) UpdateSnapshot(ctx context.Context, update cloudmigration.Up
return err
}
func (ss *sqlStore) DeleteSnapshot(ctx context.Context, snapshotUid string) error {
func (ss *sqlStore) deleteSnapshot(ctx context.Context, orgID int64, snapshotUid string) error {
return ss.db.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
_, err := sess.Delete(cloudmigration.CloudMigrationSnapshot{
UID: snapshotUid,
@@ -233,9 +233,16 @@ func (ss *sqlStore) DeleteSnapshot(ctx context.Context, snapshotUid string) erro
})
}
func (ss *sqlStore) GetSnapshotByUID(ctx context.Context, sessionUid, uid string, resultPage int, resultLimit int) (*cloudmigration.CloudMigrationSnapshot, error) {
func (ss *sqlStore) GetSnapshotByUID(ctx context.Context, orgID int64, sessionUid, uid string, resultPage int, resultLimit int) (*cloudmigration.CloudMigrationSnapshot, error) {
// first we check if the session exists, using orgId and sessionUid
session, err := ss.GetMigrationSessionByUID(ctx, orgID, sessionUid)
if err != nil || session == nil {
return nil, err
}
// now we get the snapshot
var snapshot cloudmigration.CloudMigrationSnapshot
err := ss.db.WithDbSession(ctx, func(sess *db.Session) error {
err = ss.db.WithDbSession(ctx, func(sess *db.Session) error {
exist, err := sess.Where("session_uid=? AND uid=?", sessionUid, uid).Get(&snapshot)
if err != nil {
return err
@@ -257,11 +264,11 @@ func (ss *sqlStore) GetSnapshotByUID(ctx context.Context, sessionUid, uid string
snapshot.EncryptionKey = []byte(secret)
}
resources, err := ss.GetSnapshotResources(ctx, uid, resultPage, resultLimit)
resources, err := ss.getSnapshotResources(ctx, uid, resultPage, resultLimit)
if err == nil {
snapshot.Resources = resources
}
stats, err := ss.GetSnapshotResourceStats(ctx, uid)
stats, err := ss.getSnapshotResourceStats(ctx, uid)
if err == nil {
snapshot.StatsRollup = *stats
}
@@ -274,7 +281,9 @@ func (ss *sqlStore) GetSnapshotByUID(ctx context.Context, sessionUid, uid string
func (ss *sqlStore) GetSnapshotList(ctx context.Context, query cloudmigration.ListSnapshotsQuery) ([]cloudmigration.CloudMigrationSnapshot, error) {
var snapshots = make([]cloudmigration.CloudMigrationSnapshot, 0)
err := ss.db.WithDbSession(ctx, func(sess *db.Session) error {
sess.Join("INNER", "cloud_migration_session", "cloud_migration_session.uid = cloud_migration_snapshot.session_uid")
sess.Join("INNER", "cloud_migration_session",
"cloud_migration_session.uid = cloud_migration_snapshot.session_uid AND cloud_migration_session.org_id = ?", query.OrgID,
)
if query.Limit != GetAllSnapshots {
offset := (query.Page - 1) * query.Limit
sess.Limit(query.Limit, offset)
@@ -298,7 +307,7 @@ func (ss *sqlStore) GetSnapshotList(ctx context.Context, query cloudmigration.Li
snapshot.EncryptionKey = []byte(secret)
}
if stats, err := ss.GetSnapshotResourceStats(ctx, snapshot.UID); err != nil {
if stats, err := ss.getSnapshotResourceStats(ctx, snapshot.UID); err != nil {
return nil, err
} else {
snapshot.StatsRollup = *stats
@@ -310,7 +319,7 @@ func (ss *sqlStore) GetSnapshotList(ctx context.Context, query cloudmigration.Li
// CreateUpdateSnapshotResources either updates a migration resource for a snapshot, or creates it if it does not exist
// If the uid is not known, it uses snapshot_uid + resource_uid as a lookup
func (ss *sqlStore) CreateUpdateSnapshotResources(ctx context.Context, snapshotUid string, resources []cloudmigration.CloudMigrationResource) error {
func (ss *sqlStore) createUpdateSnapshotResources(ctx context.Context, snapshotUid string, resources []cloudmigration.CloudMigrationResource) error {
return ss.db.InTransaction(ctx, func(ctx context.Context) error {
sql := "UPDATE cloud_migration_resource SET status=?, error_string=? WHERE uid=? OR (snapshot_uid=? AND resource_uid=?)"
err := ss.db.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
@@ -344,7 +353,7 @@ func (ss *sqlStore) CreateUpdateSnapshotResources(ctx context.Context, snapshotU
})
}
func (ss *sqlStore) GetSnapshotResources(ctx context.Context, snapshotUid string, page int, limit int) ([]cloudmigration.CloudMigrationResource, error) {
func (ss *sqlStore) getSnapshotResources(ctx context.Context, snapshotUid string, page int, limit int) ([]cloudmigration.CloudMigrationResource, error) {
if page < 1 {
page = 1
}
@@ -366,7 +375,7 @@ func (ss *sqlStore) GetSnapshotResources(ctx context.Context, snapshotUid string
return resources, nil
}
func (ss *sqlStore) GetSnapshotResourceStats(ctx context.Context, snapshotUid string) (*cloudmigration.SnapshotResourceStats, error) {
func (ss *sqlStore) getSnapshotResourceStats(ctx context.Context, snapshotUid string) (*cloudmigration.SnapshotResourceStats, error) {
typeCounts := make([]struct {
Count int `json:"count"`
Type string `json:"type"`
@@ -413,7 +422,7 @@ func (ss *sqlStore) GetSnapshotResourceStats(ctx context.Context, snapshotUid st
return stats, nil
}
func (ss *sqlStore) DeleteSnapshotResources(ctx context.Context, snapshotUid string) error {
func (ss *sqlStore) deleteSnapshotResources(ctx context.Context, snapshotUid string) error {
return ss.db.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
_, err := sess.Delete(cloudmigration.CloudMigrationResource{
SnapshotUID: snapshotUid,
@@ -456,3 +465,19 @@ func (ss *sqlStore) decryptToken(ctx context.Context, cm *cloudmigration.CloudMi
return nil
}
// TODO move this function dashboards/databases/databases.go
func (ss *sqlStore) GetAllDashboardsByOrgId(ctx context.Context, orgID int64) ([]*dashboards.Dashboard, error) {
//ctx, span := tracer.Start(ctx, "dashboards.database.GetAllDashboardsByOrgId")
//defer span.End()
var dashs = make([]*dashboards.Dashboard, 0)
err := ss.db.WithDbSession(ctx, func(session *db.Session) error {
// "deleted IS NULL" is to avoid deleted dashboards
return session.Where("org_id = ? AND deleted IS NULL", orgID).Find(&dashs)
})
if err != nil {
return nil, err
}
return dashs, nil
}

View File

@@ -25,7 +25,7 @@ func Test_GetAllCloudMigrationSessions(t *testing.T) {
ctx := context.Background()
t.Run("get all cloud_migration_session entries", func(t *testing.T) {
value, err := s.GetCloudMigrationSessionList(ctx)
value, err := s.GetCloudMigrationSessionList(ctx, 1)
require.NoError(t, err)
require.Equal(t, 3, len(value))
for _, m := range value {
@@ -54,6 +54,7 @@ func Test_CreateMigrationSession(t *testing.T) {
cm := cloudmigration.CloudMigrationSession{
AuthToken: encodeToken("token"),
Slug: "fake_stack",
OrgID: 3,
StackID: 1234,
RegionSlug: "fake_slug",
ClusterSlug: "fake_cluster_slug",
@@ -63,7 +64,7 @@ func Test_CreateMigrationSession(t *testing.T) {
require.NotEmpty(t, sess.ID)
require.NotEmpty(t, sess.UID)
getRes, err := s.GetMigrationSessionByUID(ctx, sess.UID)
getRes, err := s.GetMigrationSessionByUID(ctx, 3, sess.UID)
require.NoError(t, err)
require.Equal(t, sess.ID, getRes.ID)
require.Equal(t, sess.UID, getRes.UID)
@@ -80,13 +81,15 @@ func Test_GetMigrationSessionByUID(t *testing.T) {
ctx := context.Background()
t.Run("find session by uid", func(t *testing.T) {
uid := "qwerty"
mig, err := s.GetMigrationSessionByUID(ctx, uid)
orgId := int64(1)
mig, err := s.GetMigrationSessionByUID(ctx, orgId, uid)
require.NoError(t, err)
require.Equal(t, uid, mig.UID)
require.Equal(t, orgId, mig.OrgID)
})
t.Run("returns error if session is not found by uid", func(t *testing.T) {
_, err := s.GetMigrationSessionByUID(ctx, "fake_uid_1234")
_, err := s.GetMigrationSessionByUID(ctx, 1, "fake_uid_1234")
require.ErrorIs(t, cloudmigration.ErrMigrationNotFound, err)
})
}
@@ -115,7 +118,10 @@ func Test_SnapshotManagement(t *testing.T) {
ctx := context.Background()
t.Run("tests the snapshot lifecycle", func(t *testing.T) {
session, err := s.CreateMigrationSession(ctx, cloudmigration.CloudMigrationSession{})
session, err := s.CreateMigrationSession(ctx, cloudmigration.CloudMigrationSession{
OrgID: 1,
AuthToken: encodeToken("token"),
})
require.NoError(t, err)
// create a snapshot
@@ -129,7 +135,7 @@ func Test_SnapshotManagement(t *testing.T) {
require.NotEmpty(t, snapshotUid)
//retrieve it from the db
snapshot, err := s.GetSnapshotByUID(ctx, session.UID, snapshotUid, 0, 0)
snapshot, err := s.GetSnapshotByUID(ctx, 1, session.UID, snapshotUid, 0, 0)
require.NoError(t, err)
require.Equal(t, cloudmigration.SnapshotStatusCreating, snapshot.Status)
@@ -138,22 +144,22 @@ func Test_SnapshotManagement(t *testing.T) {
require.NoError(t, err)
//retrieve it again
snapshot, err = s.GetSnapshotByUID(ctx, session.UID, snapshotUid, 0, 0)
snapshot, err = s.GetSnapshotByUID(ctx, 1, session.UID, snapshotUid, 0, 0)
require.NoError(t, err)
require.Equal(t, cloudmigration.SnapshotStatusCreating, snapshot.Status)
// lists snapshots and ensures it's in there
snapshots, err := s.GetSnapshotList(ctx, cloudmigration.ListSnapshotsQuery{SessionUID: session.UID, Page: 1, Limit: 100})
snapshots, err := s.GetSnapshotList(ctx, cloudmigration.ListSnapshotsQuery{SessionUID: session.UID, OrgID: 1, Page: 1, Limit: 100})
require.NoError(t, err)
require.Len(t, snapshots, 1)
require.Equal(t, *snapshot, snapshots[0])
// delete snapshot
err = s.DeleteSnapshot(ctx, snapshotUid)
err = s.deleteSnapshot(ctx, 1, snapshotUid)
require.NoError(t, err)
// now we expect not to find the snapshot
snapshot, err = s.GetSnapshotByUID(ctx, session.UID, snapshotUid, 0, 0)
snapshot, err = s.GetSnapshotByUID(ctx, 1, session.UID, snapshotUid, 0, 0)
require.ErrorIs(t, err, cloudmigration.ErrSnapshotNotFound)
require.Nil(t, snapshot)
})
@@ -165,12 +171,12 @@ func Test_SnapshotResources(t *testing.T) {
t.Run("tests CRUD of snapshot resources", func(t *testing.T) {
// Get the default rows from the test
resources, err := s.GetSnapshotResources(ctx, "poiuy", 0, 100)
resources, err := s.getSnapshotResources(ctx, "poiuy", 0, 100)
assert.NoError(t, err)
assert.Len(t, resources, 3)
// create a new resource and update an existing resource
err = s.CreateUpdateSnapshotResources(ctx, "poiuy", []cloudmigration.CloudMigrationResource{
err = s.createUpdateSnapshotResources(ctx, "poiuy", []cloudmigration.CloudMigrationResource{
{
Type: cloudmigration.DatasourceDataType,
RefID: "mi39fj",
@@ -184,7 +190,7 @@ func Test_SnapshotResources(t *testing.T) {
assert.NoError(t, err)
// Get resources again
resources, err = s.GetSnapshotResources(ctx, "poiuy", 0, 100)
resources, err = s.getSnapshotResources(ctx, "poiuy", 0, 100)
assert.NoError(t, err)
assert.Len(t, resources, 4)
// ensure existing resource was updated
@@ -203,7 +209,7 @@ func Test_SnapshotResources(t *testing.T) {
}
// check stats
stats, err := s.GetSnapshotResourceStats(ctx, "poiuy")
stats, err := s.getSnapshotResourceStats(ctx, "poiuy")
assert.NoError(t, err)
assert.Equal(t, map[cloudmigration.MigrateDataType]int{
cloudmigration.DatasourceDataType: 2,
@@ -217,10 +223,10 @@ func Test_SnapshotResources(t *testing.T) {
assert.Equal(t, 4, stats.Total)
// delete snapshot resources
err = s.DeleteSnapshotResources(ctx, "poiuy")
err = s.deleteSnapshotResources(ctx, "poiuy")
assert.NoError(t, err)
// make sure they're gone
resources, err = s.GetSnapshotResources(ctx, "poiuy", 0, 100)
resources, err = s.getSnapshotResources(ctx, "poiuy", 0, 100)
assert.NoError(t, err)
assert.Len(t, resources, 0)
})
@@ -233,7 +239,7 @@ func TestGetSnapshotList(t *testing.T) {
ctx := context.Background()
t.Run("returns list of snapshots that belong to a session", func(t *testing.T) {
snapshots, err := s.GetSnapshotList(ctx, cloudmigration.ListSnapshotsQuery{SessionUID: sessionUID, Page: 1, Limit: 100})
snapshots, err := s.GetSnapshotList(ctx, cloudmigration.ListSnapshotsQuery{SessionUID: sessionUID, OrgID: 1, Page: 1, Limit: 100})
require.NoError(t, err)
ids := make([]string, 0)
@@ -246,7 +252,7 @@ func TestGetSnapshotList(t *testing.T) {
})
t.Run("returns only one snapshot that belongs to a session", func(t *testing.T) {
snapshots, err := s.GetSnapshotList(ctx, cloudmigration.ListSnapshotsQuery{SessionUID: sessionUID, Page: 1, Limit: 1})
snapshots, err := s.GetSnapshotList(ctx, cloudmigration.ListSnapshotsQuery{SessionUID: sessionUID, OrgID: 1, Page: 1, Limit: 1})
require.NoError(t, err)
assert.Len(t, snapshots, 1)
})
@@ -258,7 +264,7 @@ func TestGetSnapshotList(t *testing.T) {
})
t.Run("returns paginated snapshot that belongs to a session", func(t *testing.T) {
snapshots, err := s.GetSnapshotList(ctx, cloudmigration.ListSnapshotsQuery{SessionUID: sessionUID, Page: 2, Limit: 1})
snapshots, err := s.GetSnapshotList(ctx, cloudmigration.ListSnapshotsQuery{SessionUID: sessionUID, OrgID: 1, Page: 2, Limit: 1})
require.NoError(t, err)
ids := make([]string, 0)
@@ -271,7 +277,7 @@ func TestGetSnapshotList(t *testing.T) {
})
t.Run("returns desc sorted list of snapshots that belong to a session", func(t *testing.T) {
snapshots, err := s.GetSnapshotList(ctx, cloudmigration.ListSnapshotsQuery{SessionUID: sessionUID, Page: 1, Limit: 100, Sort: "latest"})
snapshots, err := s.GetSnapshotList(ctx, cloudmigration.ListSnapshotsQuery{SessionUID: sessionUID, OrgID: 1, Page: 1, Limit: 100, Sort: "latest"})
require.NoError(t, err)
ids := make([]string, 0)
@@ -291,7 +297,7 @@ func TestGetSnapshotList(t *testing.T) {
t.Run("if the session is deleted, snapshots can't be retrieved anymore", func(t *testing.T) {
// Delete the session.
_, _, err := s.DeleteMigrationSessionByUID(ctx, sessionUID)
_, _, err := s.DeleteMigrationSessionByUID(ctx, 1, sessionUID)
require.NoError(t, err)
// Fetch the snapshots that belong to the deleted session.
@@ -363,15 +369,17 @@ func setUpTest(t *testing.T) (*sqlstore.SQLStore, *sqlStore) {
// insert cloud migration test data
_, err := testDB.GetSqlxSession().Exec(ctx, `
INSERT INTO
cloud_migration_session (id, uid, auth_token, slug, stack_id, region_slug, cluster_slug, created, updated)
cloud_migration_session (id, uid, org_id, auth_token, slug, stack_id, region_slug, cluster_slug, created, updated)
VALUES
(1,'qwerty', ?, '11111', 11111, 'test', 'test', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000'),
(2,'asdfgh', ?, '22222', 22222, 'test', 'test', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000'),
(3,'zxcvbn', ?, '33333', 33333, 'test', 'test', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000');
(1,'qwerty', 1, ?, '11111', 11111, 'test', 'test', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000'),
(2,'asdfgh', 1, ?, '22222', 22222, 'test', 'test', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000'),
(3,'zxcvbn', 1, ?, '33333', 33333, 'test', 'test', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000'),
(4,'zxcvbn_org2', 2, ?, '33333', 33333, 'test', 'test', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000');
`,
encodeToken("12345"),
encodeToken("6789"),
encodeToken("777"),
encodeToken("0987"),
)
require.NoError(t, err)
@@ -380,9 +388,10 @@ func setUpTest(t *testing.T) (*sqlstore.SQLStore, *sqlStore) {
INSERT INTO
cloud_migration_snapshot (session_uid, uid, created, updated, finished, status)
VALUES
('qwerty', 'poiuy', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000', '2024-03-27 15:30:43.000', "finished"),
('qwerty', 'poiuy', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000', '2024-03-27 15:30:43.000', "finished"),
('qwerty', 'lkjhg', '2024-03-26 15:30:36.000', '2024-03-27 15:30:43.000', '2024-03-27 15:30:43.000', "finished"),
('zxcvbn', 'mnbvvc', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000', '2024-03-27 15:30:43.000', "finished");
('zxcvbn', 'mnbvvc', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000', '2024-03-27 15:30:43.000', "finished"),
('zxcvbn_org2', 'mnbvvc_org2', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000', '2024-03-27 15:30:43.000', "finished");
`,
)
require.NoError(t, err)
@@ -400,7 +409,8 @@ func setUpTest(t *testing.T) (*sqlstore.SQLStore, *sqlStore) {
('mnbvde', 'poiuy', 'DATASOURCE', 'jf38gh', 'OK', ''),
('qwerty', 'poiuy', 'DASHBOARD', 'ejcx4d', 'ERROR', 'fake error'),
('zxcvbn', 'poiuy', 'FOLDER', 'fi39fj', 'PENDING', ''),
('4fi9sd', '39fi39', 'FOLDER', 'fi39fj', 'OK', '');
('4fi9sd', '39fi39', 'FOLDER', 'fi39fj', 'OK', ''),
('4fi9ee', 'mnbvvc_org2', 'DATASOURCE', 'fi39asd', 'OK', '');
`,
)
require.NoError(t, err)

View File

@@ -21,6 +21,7 @@ var (
// CloudMigrationSession represents a configured migration token
type CloudMigrationSession struct {
ID int64 `xorm:"pk autoincr 'id'"`
OrgID int64 `xorm:"org_id"`
UID string `xorm:"uid"`
AuthToken string
Slug string
@@ -126,6 +127,8 @@ type CloudMigrationRunList struct {
type CloudMigrationSessionRequest struct {
AuthToken string
// OrgId in the on prem instance
OrgID int64
}
type CloudMigrationSessionResponse struct {
@@ -141,6 +144,7 @@ type CloudMigrationSessionListResponse struct {
type GetSnapshotsQuery struct {
SnapshotUID string
OrgID int64
SessionUID string
ResultPage int
ResultLimit int
@@ -148,6 +152,7 @@ type GetSnapshotsQuery struct {
type ListSnapshotsQuery struct {
SessionUID string
OrgID int64
Page int
Limit int
Sort string
@@ -171,13 +176,14 @@ type Base64EncodedTokenPayload struct {
Instance Base64HGInstance
}
func (p Base64EncodedTokenPayload) ToMigration() CloudMigrationSession {
func (p Base64EncodedTokenPayload) ToMigration(orgID int64) CloudMigrationSession {
return CloudMigrationSession{
AuthToken: p.Token,
Slug: p.Instance.Slug,
StackID: p.Instance.StackID,
RegionSlug: p.Instance.RegionSlug,
ClusterSlug: p.Instance.ClusterSlug,
OrgID: orgID,
}
}

View File

@@ -66,7 +66,7 @@ func addCloudMigrationsMigrations(mg *Migrator) {
}))
// --- v2 - asynchronous workflow refactor
sessionTable := Table{
migrationSessionTable := Table{
Name: "cloud_migration_session",
Columns: []*Column{
{Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true},
@@ -99,7 +99,7 @@ func addCloudMigrationsMigrations(mg *Migrator) {
},
}
addTableReplaceMigrations(mg, migrationTable, sessionTable, 2, map[string]string{
addTableReplaceMigrations(mg, migrationTable, migrationSessionTable, 2, map[string]string{
"id": "id",
"uid": "uid",
"auth_token": "auth_token",
@@ -170,4 +170,9 @@ func addCloudMigrationsMigrations(mg *Migrator) {
Type: DB_Text,
Nullable: true,
}))
// -- Adds org_id column for for all elements - defaults to 1 (default org)
mg.AddMigration("add cloud_migration_session.org_id column", NewAddColumnMigration(migrationSessionTable, &Column{
Name: "org_id", Type: DB_BigInt, Nullable: false, Default: "1",
}))
}

View File

@@ -1,8 +1,11 @@
import { DataSourceApi, dateTime, ExploreUrlState, LogsSortOrder } from '@grafana/data';
import { DataSourceApi, dateTime, ExploreUrlState, GrafanaConfig, locationUtil, LogsSortOrder } from '@grafana/data';
import { serializeStateToUrlParam } from '@grafana/data/src/utils/url';
import { config } from '@grafana/runtime';
import { DataQuery } from '@grafana/schema';
import { RefreshPicker } from '@grafana/ui';
import { getTimeSrv } from 'app/features/dashboard/services/TimeSrv';
import { DEFAULT_RANGE } from 'app/features/explore/state/utils';
import { getVariablesUrlParams } from 'app/features/variables/getAllVariableValuesForUrl';
import { DatasourceSrvMock, MockDataSourceApi } from '../../../test/mocks/datasource_srv';
@@ -152,6 +155,27 @@ describe('getExploreUrl', () => {
expect(interpolateMockLoki).toBeCalled();
expect(interpolateMockProm).toBeCalled();
});
describe('subpath', () => {
beforeAll(() => {
locationUtil.initialize({
config: { appSubUrl: '/subpath' } as GrafanaConfig,
getVariablesUrlParams: jest.fn(),
getTimeRangeForUrl: jest.fn(),
});
});
afterAll(() => {
// Reset locationUtil
locationUtil.initialize({
config,
getTimeRangeForUrl: getTimeSrv().timeRangeForUrl,
getVariablesUrlParams: getVariablesUrlParams,
});
});
it('should work with sub path', async () => {
expect(await getExploreUrl(args)).toMatch(/subpath\/explore/g);
});
});
});
describe('hasNonEmptyQuery', () => {

View File

@@ -12,6 +12,7 @@ import {
DefaultTimeZone,
getNextRefId,
IntervalValues,
locationUtil,
LogsDedupStrategy,
LogsSortOrder,
rangeUtil,
@@ -94,7 +95,7 @@ export async function getExploreUrl(args: GetExploreUrlArguments): Promise<strin
const exploreState = JSON.stringify({
[generateExploreId()]: { range: toURLRange(timeRange.raw), queries: interpolatedQueries, datasource: dsRef?.uid },
});
return urlUtil.renderUrl('/explore', { panes: exploreState, schemaVersion: 1 });
return locationUtil.assureBaseUrl(urlUtil.renderUrl('/explore', { panes: exploreState, schemaVersion: 1 }));
}
export function requestIdGenerator(exploreId: string) {

View File

@@ -112,22 +112,7 @@ def publish_artifacts_step():
"PRERELEASE_BUCKET": from_secret("prerelease_bucket"),
},
"commands": [
"./bin/build artifacts packages --tag $${DRONE_TAG} --src-bucket $${PRERELEASE_BUCKET}",
],
"depends_on": ["compile-build-cmd"],
}
def publish_static_assets_step():
return {
"name": "publish-static-assets",
"image": images["publish"],
"environment": {
"GCP_KEY": from_secret(gcp_grafanauploads_base64),
"PRERELEASE_BUCKET": from_secret("prerelease_bucket"),
"STATIC_ASSET_EDITIONS": from_secret("static_asset_editions"),
},
"commands": [
"./bin/build artifacts static-assets --tag ${DRONE_TAG} --static-asset-editions=grafana-oss",
"./bin/build artifacts packages --artifacts-editions=oss --tag $${DRONE_TAG} --src-bucket $${PRERELEASE_BUCKET}",
],
"depends_on": ["compile-build-cmd"],
}
@@ -163,9 +148,8 @@ def publish_artifacts_pipelines(mode):
steps = [
compile_build_cmd(),
publish_artifacts_step(),
publish_static_assets_step(),
publish_storybook_step(),
release_pr_step(depends_on = ["publish-artifacts", "publish-static-assets"]),
release_pr_step(depends_on = ["publish-artifacts"]),
]
return [

View File

@@ -31,43 +31,44 @@ def publish_image_public_step():
"""
command = """
bash -c '
IMAGE_TAG=$(echo "$${TAG}" | sed -e "s/+/-/g")
debug=
if [[ -n $${DRY_RUN} ]]; then debug=echo; fi
docker login -u $${DOCKER_USER} -p $${DOCKER_PASSWORD}
# Push the grafana-image-tags images
$$debug docker push grafana/grafana-image-tags:$${TAG}-amd64
$$debug docker push grafana/grafana-image-tags:$${TAG}-arm64
$$debug docker push grafana/grafana-image-tags:$${TAG}-armv7
$$debug docker push grafana/grafana-image-tags:$${TAG}-ubuntu-amd64
$$debug docker push grafana/grafana-image-tags:$${TAG}-ubuntu-arm64
$$debug docker push grafana/grafana-image-tags:$${TAG}-ubuntu-armv7
$$debug docker push grafana/grafana-image-tags:$${IMAGE_TAG}-amd64
$$debug docker push grafana/grafana-image-tags:$${IMAGE_TAG}-arm64
$$debug docker push grafana/grafana-image-tags:$${IMAGE_TAG}-armv7
$$debug docker push grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-amd64
$$debug docker push grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-arm64
$$debug docker push grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-armv7
# Create the grafana manifests
$$debug docker manifest create grafana/grafana:${TAG} \
grafana/grafana-image-tags:$${TAG}-amd64 \
grafana/grafana-image-tags:$${TAG}-arm64 \
grafana/grafana-image-tags:$${TAG}-armv7
$$debug docker manifest create grafana/grafana:$${IMAGE_TAG} \
grafana/grafana-image-tags:$${IMAGE_TAG}-amd64 \
grafana/grafana-image-tags:$${IMAGE_TAG}-arm64 \
grafana/grafana-image-tags:$${IMAGE_TAG}-armv7
$$debug docker manifest create grafana/grafana:${TAG}-ubuntu \
grafana/grafana-image-tags:$${TAG}-ubuntu-amd64 \
grafana/grafana-image-tags:$${TAG}-ubuntu-arm64 \
grafana/grafana-image-tags:$${TAG}-ubuntu-armv7
$$debug docker manifest create grafana/grafana:$${IMAGE_TAG}-ubuntu \
grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-amd64 \
grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-arm64 \
grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-armv7
# Push the grafana manifests
$$debug docker manifest push grafana/grafana:$${TAG}
$$debug docker manifest push grafana/grafana:$${TAG}-ubuntu
$$debug docker manifest push grafana/grafana:$${IMAGE_TAG}
$$debug docker manifest push grafana/grafana:$${IMAGE_TAG}-ubuntu
# if LATEST is set, then also create & push latest
if [[ -n $${LATEST} ]]; then
$$debug docker manifest create grafana/grafana:latest \
grafana/grafana-image-tags:$${TAG}-amd64 \
grafana/grafana-image-tags:$${TAG}-arm64 \
grafana/grafana-image-tags:$${TAG}-armv7
grafana/grafana-image-tags:$${IMAGE_TAG}-amd64 \
grafana/grafana-image-tags:$${IMAGE_TAG}-arm64 \
grafana/grafana-image-tags:$${IMAGE_TAG}-armv7
$$debug docker manifest create grafana/grafana:latest-ubuntu \
grafana/grafana-image-tags:$${TAG}-ubuntu-amd64 \
grafana/grafana-image-tags:$${TAG}-ubuntu-arm64 \
grafana/grafana-image-tags:$${TAG}-ubuntu-armv7
grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-amd64 \
grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-arm64 \
grafana/grafana-image-tags:$${IMAGE_TAG}-ubuntu-armv7
$$debug docker manifest push grafana/grafana:latest
$$debug docker manifest push grafana/grafana:latest-ubuntu

View File

@@ -2,7 +2,6 @@
This module returns a Drone step and pipeline for linting with shellcheck.
"""
load("scripts/drone/steps/lib.star", "compile_build_cmd")
load(
"scripts/drone/utils/images.star",
"images",
@@ -39,7 +38,6 @@ def shellcheck_step():
def shellcheck_pipeline():
environment = {"EDITION": "oss"}
steps = [
compile_build_cmd(),
shellcheck_step(),
]
return pipeline(

View File

@@ -4,7 +4,6 @@ This module returns a Drone pipeline that verifies all Starlark files are linted
load(
"scripts/drone/steps/lib.star",
"compile_build_cmd",
"identify_runner_step",
"lint_starlark_step",
)
@@ -17,7 +16,6 @@ def verify_starlark(trigger, ver_mode):
environment = {"EDITION": "oss"}
steps = [
identify_runner_step(),
compile_build_cmd(),
lint_starlark_step(),
]
return pipeline(

View File

@@ -156,9 +156,7 @@ def lint_starlark_step():
"go install github.com/bazelbuild/buildtools/buildifier@latest",
"buildifier --lint=warn -mode=check -r .",
],
"depends_on": [
"compile-build-cmd",
],
"depends_on": [],
}
def enterprise_downstream_step(ver_mode):
@@ -1285,13 +1283,14 @@ def retry_command(command, attempts = 60, delay = 30):
]
def verify_linux_DEB_packages_step(depends_on = []):
install_command = "apt-get update >/dev/null 2>&1 && DEBIAN_FRONTEND=noninteractive apt-get install -yq grafana=${TAG} >/dev/null 2>&1"
install_command = "apt-get update >/dev/null 2>&1 && DEBIAN_FRONTEND=noninteractive apt-get install -yq grafana=$version >/dev/null 2>&1"
return {
"name": "verify-linux-DEB-packages",
"image": images["ubuntu"],
"environment": {},
"commands": [
'export version=$(echo ${TAG} | sed -e "s/+security-/-/g")',
'echo "Step 1: Updating package lists..."',
"apt-get update >/dev/null 2>&1",
'echo "Step 2: Installing prerequisites..."',
@@ -1305,10 +1304,10 @@ def verify_linux_DEB_packages_step(depends_on = []):
# The packages take a bit of time to propogate within the repo. This retry will check their availability within 10 minutes.
] + retry_command(install_command) + [
'echo "Step 6: Verifying Grafana installation..."',
'if dpkg -s grafana | grep -q "Version: ${TAG}"; then',
' echo "Successfully verified Grafana version ${TAG}"',
'if dpkg -s grafana | grep -q "Version: $version"; then',
' echo "Successfully verified Grafana version $version"',
"else",
' echo "Failed to verify Grafana version ${TAG}"',
' echo "Failed to verify Grafana version $version"',
" exit 1",
"fi",
'echo "Verification complete."',
@@ -1329,7 +1328,7 @@ def verify_linux_RPM_packages_step(depends_on = []):
"sslcacert=/etc/pki/tls/certs/ca-bundle.crt\n"
)
install_command = "dnf install -y --nogpgcheck grafana-${TAG} >/dev/null 2>&1"
install_command = "dnf install -y --nogpgcheck grafana-$version >/dev/null 2>&1"
return {
"name": "verify-linux-RPM-packages",
@@ -1345,7 +1344,8 @@ def verify_linux_RPM_packages_step(depends_on = []):
'echo "Step 4: Configuring Grafana repository..."',
"echo -e '" + repo_config + "' > /etc/yum.repos.d/grafana.repo",
'echo "Step 5: Checking RPM repository..."',
"dnf list available grafana-${TAG}",
'export version=$(echo "${TAG}" | sed -e "s/+security-/^security_/g")',
"dnf list available grafana-$version",
"if [ $? -eq 0 ]; then",
' echo "Grafana package found in repository. Installing from repo..."',
] + retry_command(install_command) + [
@@ -1353,16 +1353,16 @@ def verify_linux_RPM_packages_step(depends_on = []):
" rpm --import https://rpm.grafana.com/gpg.key",
" rpm -qa gpg-pubkey* | xargs rpm -qi | grep -i grafana",
"else",
' echo "Grafana package version ${TAG} not found in repository."',
' echo "Grafana package version $version not found in repository."',
" dnf repolist",
" dnf list available grafana*",
" exit 1",
"fi",
'echo "Step 6: Verifying Grafana installation..."',
'if rpm -q grafana | grep -q "${TAG}"; then',
' echo "Successfully verified Grafana version ${TAG}"',
'if rpm -q grafana | grep -q "$verison"; then',
' echo "Successfully verified Grafana version $version"',
"else",
' echo "Failed to verify Grafana version ${TAG}"',
' echo "Failed to verify Grafana version $version"',
" exit 1",
"fi",
'echo "Verification complete."',

View File

@@ -2,7 +2,7 @@
global variables
"""
grabpl_version = "v3.0.53"
grabpl_version = "v3.0.56"
golang_version = "1.23.1"
# nodejs_version should match what's in ".nvmrc", but without the v prefix.