mirror of
https://github.com/grafana/grafana.git
synced 2025-12-20 19:44:55 +08:00
Compare commits
60 Commits
docs/add-t
...
v5.4.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
653918056c | ||
|
|
194153aa62 | ||
|
|
c0400f32ad | ||
|
|
aaac7cc556 | ||
|
|
c2e708042c | ||
|
|
17f4a03d6f | ||
|
|
5743ceaa93 | ||
|
|
0f3a938f0b | ||
|
|
4796ec8cd4 | ||
|
|
a304e5b600 | ||
|
|
54b725b91b | ||
|
|
ca88aedbdd | ||
|
|
566c7b17ad | ||
|
|
d812109ebf | ||
|
|
f44a006cb9 | ||
|
|
43c0405ae1 | ||
|
|
ed05e9de72 | ||
|
|
e43f13bc03 | ||
|
|
308c818cd7 | ||
|
|
d49d8bf13d | ||
|
|
3701f22d66 | ||
|
|
4c60ef398a | ||
|
|
ec98666de1 | ||
|
|
7fe10e2eef | ||
|
|
4a8cd4c023 | ||
|
|
174be1abab | ||
|
|
4c13e02aef | ||
|
|
4a8a3d40e7 | ||
|
|
01c4b71cfb | ||
|
|
e7cd39a543 | ||
|
|
6f241a4bac | ||
|
|
0a19581c48 | ||
|
|
96cb4df83a | ||
|
|
7820775a53 | ||
|
|
9699133501 | ||
|
|
80ecd8ea8e | ||
|
|
2ab2259091 | ||
|
|
deb305b95f | ||
|
|
d42c17efad | ||
|
|
972aaef2a6 | ||
|
|
ce3982d406 | ||
|
|
69c5191926 | ||
|
|
99ee3bbe5a | ||
|
|
01840cbd70 | ||
|
|
23b19543bd | ||
|
|
bb4e5934fb | ||
|
|
fd3821d2f1 | ||
|
|
8b1d0b14b6 | ||
|
|
23c6bea21b | ||
|
|
e3abefa19f | ||
|
|
4ee92bd59c | ||
|
|
780e5153d0 | ||
|
|
be9058d7ef | ||
|
|
3301f96811 | ||
|
|
1c59669da0 | ||
|
|
1ad60be47b | ||
|
|
9ec0af73ec | ||
|
|
8190d10827 | ||
|
|
18b5f630f7 | ||
|
|
9df26af3db |
@@ -19,7 +19,7 @@ version: 2
|
||||
jobs:
|
||||
mysql-integration-test:
|
||||
docker:
|
||||
- image: circleci/golang:1.11
|
||||
- image: circleci/golang:1.11.4
|
||||
- image: circleci/mysql:5.6-ram
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: rootpass
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
|
||||
postgres-integration-test:
|
||||
docker:
|
||||
- image: circleci/golang:1.11
|
||||
- image: circleci/golang:1.11.4
|
||||
- image: circleci/postgres:9.3-ram
|
||||
environment:
|
||||
POSTGRES_USER: grafanatest
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
|
||||
gometalinter:
|
||||
docker:
|
||||
- image: circleci/golang:1.11
|
||||
- image: circleci/golang:1.11.4
|
||||
environment:
|
||||
# we need CGO because of go-sqlite3
|
||||
CGO_ENABLED: 1
|
||||
@@ -89,9 +89,6 @@ jobs:
|
||||
- run: 'go get -u github.com/opennota/check/cmd/structcheck'
|
||||
- run: 'go get -u github.com/mdempsky/unconvert'
|
||||
- run: 'go get -u github.com/opennota/check/cmd/varcheck'
|
||||
- run:
|
||||
name: run linters
|
||||
command: 'gometalinter --enable-gc --vendor --deadline 10m --disable-all --enable=deadcode --enable=goconst --enable=gofmt --enable=ineffassign --enable=megacheck --enable=structcheck --enable=unconvert --enable=varcheck ./...'
|
||||
- run:
|
||||
name: run go vet
|
||||
command: 'go vet ./pkg/...'
|
||||
@@ -117,7 +114,7 @@ jobs:
|
||||
|
||||
test-backend:
|
||||
docker:
|
||||
- image: circleci/golang:1.11
|
||||
- image: circleci/golang:1.11.4
|
||||
working_directory: /go/src/github.com/grafana/grafana
|
||||
steps:
|
||||
- checkout
|
||||
@@ -127,7 +124,7 @@ jobs:
|
||||
|
||||
build-all:
|
||||
docker:
|
||||
- image: grafana/build-container:1.2.1
|
||||
- image: grafana/build-container:1.2.2
|
||||
working_directory: /go/src/github.com/grafana/grafana
|
||||
steps:
|
||||
- checkout
|
||||
@@ -162,8 +159,8 @@ jobs:
|
||||
name: Build Grafana.com master publisher
|
||||
command: 'go build -o scripts/publish scripts/build/publish.go'
|
||||
- run:
|
||||
name: Build Grafana.com release publisher
|
||||
command: 'cd scripts/build/release_publisher && go build -o release_publisher .'
|
||||
name: Test and build Grafana.com release publisher
|
||||
command: 'cd scripts/build/release_publisher && go test . && go build -o release_publisher .'
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
@@ -175,7 +172,7 @@ jobs:
|
||||
|
||||
build:
|
||||
docker:
|
||||
- image: grafana/build-container:1.2.1
|
||||
- image: grafana/build-container:1.2.2
|
||||
working_directory: /go/src/github.com/grafana/grafana
|
||||
steps:
|
||||
- checkout
|
||||
@@ -191,57 +188,60 @@ jobs:
|
||||
- run:
|
||||
name: sha-sum packages
|
||||
command: 'go run build.go sha-dist'
|
||||
- run:
|
||||
name: Test Grafana.com release publisher
|
||||
command: 'cd scripts/build/release_publisher && go test .'
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- dist/grafana*
|
||||
|
||||
grafana-docker-master:
|
||||
docker:
|
||||
- image: docker:stable-git
|
||||
machine:
|
||||
image: circleci/classic:201808-01
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- setup_remote_docker
|
||||
- run: docker info
|
||||
- run: cp dist/grafana-latest.linux-x64.tar.gz packaging/docker
|
||||
- run: docker run --privileged linuxkit/binfmt:v0.6
|
||||
- run: cp dist/grafana-latest.linux-*.tar.gz packaging/docker
|
||||
- run: cd packaging/docker && ./build-deploy.sh "master-${CIRCLE_SHA1}"
|
||||
- run: rm packaging/docker/grafana-latest.linux-x64.tar.gz
|
||||
- run: rm packaging/docker/grafana-latest.linux-*.tar.gz
|
||||
- run: cp enterprise-dist/grafana-enterprise-*.linux-amd64.tar.gz packaging/docker/grafana-latest.linux-x64.tar.gz
|
||||
- run: cd packaging/docker && ./build-enterprise.sh "master"
|
||||
|
||||
|
||||
grafana-docker-pr:
|
||||
docker:
|
||||
- image: docker:stable-git
|
||||
machine:
|
||||
image: circleci/classic:201808-01
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- setup_remote_docker
|
||||
- run: docker info
|
||||
- run: cp dist/grafana-latest.linux-x64.tar.gz packaging/docker
|
||||
- run: docker run --privileged linuxkit/binfmt:v0.6
|
||||
- run: cp dist/grafana-latest.linux-*.tar.gz packaging/docker
|
||||
- run: cd packaging/docker && ./build.sh "${CIRCLE_SHA1}"
|
||||
|
||||
grafana-docker-release:
|
||||
docker:
|
||||
- image: docker:stable-git
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- setup_remote_docker
|
||||
- run: docker info
|
||||
- run: cp dist/grafana-latest.linux-x64.tar.gz packaging/docker
|
||||
- run: cd packaging/docker && ./build-deploy.sh "${CIRCLE_TAG}"
|
||||
- run: rm packaging/docker/grafana-latest.linux-x64.tar.gz
|
||||
- run: cp enterprise-dist/grafana-enterprise-*.linux-amd64.tar.gz packaging/docker/grafana-latest.linux-x64.tar.gz
|
||||
- run: cd packaging/docker && ./build-enterprise.sh "${CIRCLE_TAG}"
|
||||
machine:
|
||||
image: circleci/classic:201808-01
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run: docker info
|
||||
- run: docker run --privileged linuxkit/binfmt:v0.6
|
||||
- run: cp dist/grafana-latest.linux-*.tar.gz packaging/docker
|
||||
- run: cd packaging/docker && ./build-deploy.sh "${CIRCLE_TAG}"
|
||||
- run: rm packaging/docker/grafana-latest.linux-*.tar.gz
|
||||
- run: cp enterprise-dist/grafana-enterprise-*.linux-amd64.tar.gz packaging/docker/grafana-latest.linux-x64.tar.gz
|
||||
- run: cd packaging/docker && ./build-enterprise.sh "${CIRCLE_TAG}"
|
||||
|
||||
build-enterprise:
|
||||
docker:
|
||||
- image: grafana/build-container:1.2.1
|
||||
- image: grafana/build-container:1.2.2
|
||||
working_directory: /go/src/github.com/grafana/grafana
|
||||
steps:
|
||||
- checkout
|
||||
@@ -273,7 +273,7 @@ jobs:
|
||||
|
||||
build-all-enterprise:
|
||||
docker:
|
||||
- image: grafana/build-container:1.2.1
|
||||
- image: grafana/build-container:1.2.2
|
||||
working_directory: /go/src/github.com/grafana/grafana
|
||||
steps:
|
||||
- checkout
|
||||
@@ -320,7 +320,7 @@ jobs:
|
||||
|
||||
deploy-enterprise-master:
|
||||
docker:
|
||||
- image: grafana/grafana-ci-deploy:1.0.0
|
||||
- image: grafana/grafana-ci-deploy:1.1.0
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: .
|
||||
@@ -343,7 +343,7 @@ jobs:
|
||||
|
||||
deploy-enterprise-release:
|
||||
docker:
|
||||
- image: grafana/grafana-ci-deploy:1.0.0
|
||||
- image: grafana/grafana-ci-deploy:1.1.0
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: .
|
||||
@@ -362,10 +362,20 @@ jobs:
|
||||
- run:
|
||||
name: Deploy to Grafana.com
|
||||
command: './scripts/build/publish.sh --enterprise'
|
||||
- run:
|
||||
name: Load GPG private key
|
||||
comand: './scripts/build/load-signing-key.sh'
|
||||
- run:
|
||||
name: Update Debian repository
|
||||
command: './scripts/build/update_repo/update-deb.sh "enterprise" "$GPG_KEY_PASSWORD" "$CIRCLE_TAG"'
|
||||
- run:
|
||||
name: Update RPM repository
|
||||
command: './scripts/build/update_repo/update-rpm.sh "enterprise" "$GPG_KEY_PASSWORD" "$CIRCLE_TAG"'
|
||||
|
||||
|
||||
deploy-master:
|
||||
docker:
|
||||
- image: grafana/grafana-ci-deploy:1.0.0
|
||||
- image: grafana/grafana-ci-deploy:1.1.0
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: .
|
||||
@@ -395,8 +405,9 @@ jobs:
|
||||
|
||||
deploy-release:
|
||||
docker:
|
||||
- image: grafana/grafana-ci-deploy:1.0.0
|
||||
- image: grafana/grafana-ci-deploy:1.1.0
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
@@ -414,6 +425,15 @@ jobs:
|
||||
- run:
|
||||
name: Deploy to Grafana.com
|
||||
command: './scripts/build/publish.sh'
|
||||
- run:
|
||||
name: Load GPG private key
|
||||
command: './scripts/build/load-signing-key.sh'
|
||||
- run:
|
||||
name: Update Debian repository
|
||||
command: './scripts/build/update_repo/update-deb.sh "oss" "$GPG_KEY_PASSWORD" "$CIRCLE_TAG"'
|
||||
- run:
|
||||
name: Update RPM repository
|
||||
command: './scripts/build/update_repo/update-rpm.sh "oss" "$GPG_KEY_PASSWORD" "$CIRCLE_TAG"'
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
@@ -510,6 +530,7 @@ workflows:
|
||||
- grafana-docker-release:
|
||||
requires:
|
||||
- build-all
|
||||
- build-all-enterprise
|
||||
- test-backend
|
||||
- test-frontend
|
||||
- codespell
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Golang build container
|
||||
FROM golang:1.11
|
||||
FROM golang:1.11.4
|
||||
|
||||
WORKDIR $GOPATH/src/github.com/grafana/grafana
|
||||
|
||||
@@ -50,7 +50,8 @@ ENV PATH=/usr/share/grafana/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bi
|
||||
|
||||
WORKDIR $GF_PATHS_HOME
|
||||
|
||||
RUN apt-get update && apt-get install -qq -y libfontconfig ca-certificates && \
|
||||
RUN apt-get update && apt-get upgrade -y && \
|
||||
apt-get install -qq -y libfontconfig ca-certificates && \
|
||||
apt-get autoremove -y && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ clone_folder: c:\gopath\src\github.com\grafana\grafana
|
||||
environment:
|
||||
nodejs_version: "8"
|
||||
GOPATH: C:\gopath
|
||||
GOVERSION: 1.11
|
||||
GOVERSION: 1.11.4
|
||||
|
||||
install:
|
||||
- rmdir c:\go /s /q
|
||||
|
||||
2
build.go
2
build.go
@@ -164,6 +164,8 @@ func makeLatestDistCopies() {
|
||||
"_amd64.deb": "dist/grafana_latest_amd64.deb",
|
||||
".x86_64.rpm": "dist/grafana-latest-1.x86_64.rpm",
|
||||
".linux-amd64.tar.gz": "dist/grafana-latest.linux-x64.tar.gz",
|
||||
".linux-armv7.tar.gz": "dist/grafana-latest.linux-armv7.tar.gz",
|
||||
".linux-arm64.tar.gz": "dist/grafana-latest.linux-arm64.tar.gz",
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
|
||||
@@ -246,6 +246,10 @@ disable_signout_menu = false
|
||||
# URL to redirect the user to after sign out
|
||||
signout_redirect_url =
|
||||
|
||||
# Set to true to attempt login with OAuth automatically, skipping the login screen.
|
||||
# This setting is ignored if multiple OAuth providers are configured.
|
||||
oauth_auto_login = false
|
||||
|
||||
#################################### Anonymous Auth ######################
|
||||
[auth.anonymous]
|
||||
# enable anonymous access
|
||||
|
||||
@@ -223,6 +223,10 @@ log_queries =
|
||||
# URL to redirect the user to after sign out
|
||||
;signout_redirect_url =
|
||||
|
||||
# Set to true to attempt login with OAuth automatically, skipping the login screen.
|
||||
# This setting is ignored if multiple OAuth providers are configured.
|
||||
;oauth_auto_login = false
|
||||
|
||||
#################################### Anonymous Auth ##########################
|
||||
[auth.anonymous]
|
||||
# enable anonymous access
|
||||
|
||||
@@ -133,9 +133,9 @@ Macro example | Description
|
||||
------------ | -------------
|
||||
*$__time(dateColumn)* | Will be replaced by an expression to convert to a UNIX timestamp and rename the column to `time_sec`. For example, *UNIX_TIMESTAMP(dateColumn) as time_sec*
|
||||
*$__timeEpoch(dateColumn)* | Will be replaced by an expression to convert to a UNIX timestamp and rename the column to `time_sec`. For example, *UNIX_TIMESTAMP(dateColumn) as time_sec*
|
||||
*$__timeFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name. For example, *dateColumn BETWEEN '2017-04-21T05:01:17Z' AND '2017-04-21T05:06:17Z'*
|
||||
*$__timeFrom()* | Will be replaced by the start of the currently active time selection. For example, *'2017-04-21T05:01:17Z'*
|
||||
*$__timeTo()* | Will be replaced by the end of the currently active time selection. For example, *'2017-04-21T05:06:17Z'*
|
||||
*$__timeFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name. For example, *dateColumn BETWEEN FROM_UNIXTIME(1494410783) AND FROM_UNIXTIME(1494410983)*
|
||||
*$__timeFrom()* | Will be replaced by the start of the currently active time selection. For example, *FROM_UNIXTIME(1494410783)*
|
||||
*$__timeTo()* | Will be replaced by the end of the currently active time selection. For example, *FROM_UNIXTIME(1494410983)*
|
||||
*$__timeGroup(dateColumn,'5m')* | Will be replaced by an expression usable in GROUP BY clause. For example, *cast(cast(UNIX_TIMESTAMP(dateColumn)/(300) as signed)*300 as signed),*
|
||||
*$__timeGroup(dateColumn,'5m', 0)* | Same as above but with a fill parameter so missing points in that series will be added by grafana and 0 will be used as value.
|
||||
*$__timeGroup(dateColumn,'5m', NULL)* | Same as above but NULL will be used as value for missing points.
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"company": "Grafana Labs"
|
||||
},
|
||||
"name": "grafana",
|
||||
"version": "5.4.0-pre1",
|
||||
"version": "5.4.3",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "http://github.com/grafana/grafana.git"
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
FROM debian:stretch-slim
|
||||
ARG BASE_IMAGE=debian:stretch-slim
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
ARG GRAFANA_TGZ="grafana-latest.linux-x64.tar.gz"
|
||||
|
||||
@@ -10,7 +11,8 @@ COPY ${GRAFANA_TGZ} /tmp/grafana.tar.gz
|
||||
|
||||
RUN mkdir /tmp/grafana && tar xfvz /tmp/grafana.tar.gz --strip-components=1 -C /tmp/grafana
|
||||
|
||||
FROM debian:stretch-slim
|
||||
ARG BASE_IMAGE=debian:stretch-slim
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
ARG GF_UID="472"
|
||||
ARG GF_GID="472"
|
||||
@@ -25,7 +27,8 @@ ENV PATH=/usr/share/grafana/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bi
|
||||
|
||||
WORKDIR $GF_PATHS_HOME
|
||||
|
||||
RUN apt-get update && apt-get install -qq -y libfontconfig ca-certificates curl && \
|
||||
RUN apt-get update && apt-get -y upgrade && \
|
||||
apt-get install -qq -y libfontconfig ca-certificates curl && \
|
||||
apt-get autoremove -y && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
@@ -8,6 +8,5 @@ docker login -u "$DOCKER_USER" -p "$DOCKER_PASS"
|
||||
./push_to_docker_hub.sh "$_grafana_version"
|
||||
|
||||
if echo "$_grafana_version" | grep -q "^master-"; then
|
||||
apk add --no-cache curl
|
||||
./deploy_to_k8s.sh "grafana/grafana-dev:$_grafana_version"
|
||||
fi
|
||||
|
||||
@@ -1,12 +1,25 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
_grafana_tag=$1
|
||||
_raw_grafana_tag=$1
|
||||
_docker_repo=${2:-grafana/grafana-enterprise}
|
||||
|
||||
if echo "$_raw_grafana_tag" | grep -q "^v"; then
|
||||
_grafana_tag=$(echo "${_raw_grafana_tag}" | cut -d "v" -f 2)
|
||||
else
|
||||
_grafana_tag="${_raw_grafana_tag}"
|
||||
fi
|
||||
|
||||
echo "Building and deploying ${_docker_repo}:${_grafana_tag}"
|
||||
|
||||
docker build \
|
||||
--tag "${_docker_repo}:${_grafana_tag}"\
|
||||
--no-cache=true \
|
||||
.
|
||||
|
||||
docker push "${_docker_repo}:${_grafana_tag}"
|
||||
|
||||
if echo "$_raw_grafana_tag" | grep -q "^v" && echo "$_raw_grafana_tag" | grep -qv "beta"; then
|
||||
docker tag "${_docker_repo}:${_grafana_tag}" "${_docker_repo}:latest"
|
||||
docker push "${_docker_repo}:latest"
|
||||
fi
|
||||
|
||||
@@ -1,25 +1,49 @@
|
||||
#!/bin/sh
|
||||
|
||||
_grafana_tag=$1
|
||||
_grafana_tag=${1:-}
|
||||
_docker_repo=${2:-grafana/grafana}
|
||||
|
||||
# If the tag starts with v, treat this as a official release
|
||||
if echo "$_grafana_tag" | grep -q "^v"; then
|
||||
_grafana_version=$(echo "${_grafana_tag}" | cut -d "v" -f 2)
|
||||
_docker_repo=${2:-grafana/grafana}
|
||||
else
|
||||
_grafana_version=$_grafana_tag
|
||||
_docker_repo=${2:-grafana/grafana-dev}
|
||||
fi
|
||||
|
||||
echo "Building ${_docker_repo}:${_grafana_version}"
|
||||
|
||||
docker build \
|
||||
--tag "${_docker_repo}:${_grafana_version}" \
|
||||
--no-cache=true .
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
|
||||
# Build grafana image for a specific arch
|
||||
docker_build () {
|
||||
base_image=$1
|
||||
grafana_tgz=$2
|
||||
tag=$3
|
||||
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE=${base_image} \
|
||||
--build-arg GRAFANA_TGZ=${grafana_tgz} \
|
||||
--tag "${tag}" \
|
||||
--no-cache=true .
|
||||
}
|
||||
|
||||
# Tag docker images of all architectures
|
||||
docker_tag_all () {
|
||||
repo=$1
|
||||
tag=$2
|
||||
docker tag "${_docker_repo}:${_grafana_version}" "${repo}:${tag}"
|
||||
docker tag "${_docker_repo}-arm32v7-linux:${_grafana_version}" "${repo}-arm32v7-linux:${tag}"
|
||||
docker tag "${_docker_repo}-arm64v8-linux:${_grafana_version}" "${repo}-arm64v8-linux:${tag}"
|
||||
}
|
||||
|
||||
docker_build "debian:stretch-slim" "grafana-latest.linux-x64.tar.gz" "${_docker_repo}:${_grafana_version}"
|
||||
docker_build "arm32v7/debian:stretch-slim" "grafana-latest.linux-armv7.tar.gz" "${_docker_repo}-arm32v7-linux:${_grafana_version}"
|
||||
docker_build "arm64v8/debian:stretch-slim" "grafana-latest.linux-arm64.tar.gz" "${_docker_repo}-arm64v8-linux:${_grafana_version}"
|
||||
|
||||
# Tag as 'latest' for official release; otherwise tag as grafana/grafana:master
|
||||
if echo "$_grafana_tag" | grep -q "^v"; then
|
||||
docker tag "${_docker_repo}:${_grafana_version}" "${_docker_repo}:latest"
|
||||
docker_tag_all "${_docker_repo}" "latest"
|
||||
else
|
||||
docker tag "${_docker_repo}:${_grafana_version}" "grafana/grafana:master"
|
||||
docker_tag_all "${_docker_repo}" "master"
|
||||
docker tag "${_docker_repo}:${_grafana_version}" "grafana/grafana-dev:${_grafana_version}"
|
||||
fi
|
||||
|
||||
@@ -1,24 +1,46 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
_grafana_tag=$1
|
||||
_grafana_tag=${1:-}
|
||||
_docker_repo=${2:-grafana/grafana}
|
||||
|
||||
# If the tag starts with v, treat this as a official release
|
||||
if echo "$_grafana_tag" | grep -q "^v"; then
|
||||
_grafana_version=$(echo "${_grafana_tag}" | cut -d "v" -f 2)
|
||||
_docker_repo=${2:-grafana/grafana}
|
||||
else
|
||||
_grafana_version=$_grafana_tag
|
||||
_docker_repo=${2:-grafana/grafana-dev}
|
||||
fi
|
||||
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
|
||||
echo "pushing ${_docker_repo}:${_grafana_version}"
|
||||
docker push "${_docker_repo}:${_grafana_version}"
|
||||
|
||||
|
||||
docker_push_all () {
|
||||
repo=$1
|
||||
tag=$2
|
||||
|
||||
# Push each image individually
|
||||
docker push "${repo}:${tag}"
|
||||
docker push "${repo}-arm32v7-linux:${tag}"
|
||||
docker push "${repo}-arm64v8-linux:${tag}"
|
||||
|
||||
# Create and push a multi-arch manifest
|
||||
docker manifest create "${repo}:${tag}" \
|
||||
"${repo}:${tag}" \
|
||||
"${repo}-arm32v7-linux:${tag}" \
|
||||
"${repo}-arm64v8-linux:${tag}"
|
||||
|
||||
docker manifest push "${repo}:${tag}"
|
||||
}
|
||||
|
||||
if echo "$_grafana_tag" | grep -q "^v" && echo "$_grafana_tag" | grep -vq "beta"; then
|
||||
echo "pushing ${_docker_repo}:latest"
|
||||
docker push "${_docker_repo}:latest"
|
||||
docker_push_all "${_docker_repo}" "latest"
|
||||
docker_push_all "${_docker_repo}" "${_grafana_version}"
|
||||
elif echo "$_grafana_tag" | grep -q "^v" && echo "$_grafana_tag" | grep -q "beta"; then
|
||||
docker_push_all "${_docker_repo}" "${_grafana_version}"
|
||||
elif echo "$_grafana_tag" | grep -q "master"; then
|
||||
echo "pushing grafana/grafana:master"
|
||||
docker push grafana/grafana:master
|
||||
docker_push_all "${_docker_repo}" "master"
|
||||
docker push "grafana/grafana-dev:${_grafana_version}"
|
||||
fi
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
#! /usr/bin/env bash
|
||||
version=5.0.2
|
||||
version=5.4.3
|
||||
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${version}_amd64.deb
|
||||
# wget https://dl.grafana.com/oss/release/grafana_${version}_amd64.deb
|
||||
#
|
||||
# package_cloud push grafana/stable/debian/jessie grafana_${version}_amd64.deb
|
||||
# package_cloud push grafana/stable/debian/wheezy grafana_${version}_amd64.deb
|
||||
# package_cloud push grafana/stable/debian/stretch grafana_${version}_amd64.deb
|
||||
#
|
||||
# package_cloud push grafana/testing/debian/jessie grafana_${version}_amd64.deb
|
||||
# package_cloud push grafana/testing/debian/wheezy grafana_${version}_amd64.deb --verbose
|
||||
# package_cloud push grafana/testing/debian/stretch grafana_${version}_amd64.deb --verbose
|
||||
|
||||
package_cloud push grafana/stable/debian/jessie grafana_${version}_amd64.deb
|
||||
package_cloud push grafana/stable/debian/wheezy grafana_${version}_amd64.deb
|
||||
package_cloud push grafana/stable/debian/stretch grafana_${version}_amd64.deb
|
||||
|
||||
package_cloud push grafana/testing/debian/jessie grafana_${version}_amd64.deb
|
||||
package_cloud push grafana/testing/debian/wheezy grafana_${version}_amd64.deb --verbose
|
||||
package_cloud push grafana/testing/debian/stretch grafana_${version}_amd64.deb --verbose
|
||||
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-${version}-1.x86_64.rpm
|
||||
wget https://dl.grafana.com/oss/release/grafana-${version}-1.x86_64.rpm
|
||||
|
||||
package_cloud push grafana/testing/el/6 grafana-${version}-1.x86_64.rpm --verbose
|
||||
package_cloud push grafana/testing/el/7 grafana-${version}-1.x86_64.rpm --verbose
|
||||
|
||||
@@ -277,10 +277,6 @@ func PostDashboard(c *m.ReqContext, cmd m.SaveDashboardCommand) Response {
|
||||
return Error(500, "Failed to save dashboard", err)
|
||||
}
|
||||
|
||||
if err == m.ErrDashboardFailedToUpdateAlertData {
|
||||
return Error(500, "Invalid alert data. Cannot save dashboard", err)
|
||||
}
|
||||
|
||||
c.TimeRequest(metrics.M_Api_Dashboard_Save)
|
||||
return JSON(200, util.DynMap{
|
||||
"status": "success",
|
||||
|
||||
@@ -727,7 +727,6 @@ func TestDashboardApiEndpoint(t *testing.T) {
|
||||
{SaveError: m.ErrDashboardTitleEmpty, ExpectedStatusCode: 400},
|
||||
{SaveError: m.ErrDashboardFolderCannotHaveParent, ExpectedStatusCode: 400},
|
||||
{SaveError: alerting.ValidationError{Reason: "Mu"}, ExpectedStatusCode: 422},
|
||||
{SaveError: m.ErrDashboardFailedToUpdateAlertData, ExpectedStatusCode: 500},
|
||||
{SaveError: m.ErrDashboardFailedGenerateUniqueUid, ExpectedStatusCode: 500},
|
||||
{SaveError: m.ErrDashboardTypeMismatch, ExpectedStatusCode: 400},
|
||||
{SaveError: m.ErrDashboardFolderWithSameNameAsDashboard, ExpectedStatusCode: 400},
|
||||
|
||||
@@ -51,7 +51,7 @@ func ApplyRoute(ctx context.Context, req *http.Request, proxyPath string, route
|
||||
if token, err := tokenProvider.getAccessToken(data); err != nil {
|
||||
logger.Error("Failed to get access token", "error", err)
|
||||
} else {
|
||||
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token))
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ func ApplyRoute(ctx context.Context, req *http.Request, proxyPath string, route
|
||||
if token, err := tokenProvider.getJwtAccessToken(ctx, data); err != nil {
|
||||
logger.Error("Failed to get access token", "error", err)
|
||||
} else {
|
||||
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token))
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ func ApplyRoute(ctx context.Context, req *http.Request, proxyPath string, route
|
||||
if err != nil {
|
||||
logger.Error("Failed to get default access token from meta data server", "error", err)
|
||||
} else {
|
||||
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.AccessToken))
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.AccessToken))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,7 +87,7 @@ func NewApiPluginProxy(ctx *m.ReqContext, proxyPath string, route *plugins.AppPl
|
||||
}
|
||||
|
||||
for key, value := range headers {
|
||||
log.Trace("setting key %v value %v", key, value[0])
|
||||
log.Trace("setting key %v value <redacted>", key)
|
||||
req.Header.Set(key, value[0])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ var (
|
||||
ErrDashboardVersionMismatch = errors.New("The dashboard has been changed by someone else")
|
||||
ErrDashboardTitleEmpty = errors.New("Dashboard title cannot be empty")
|
||||
ErrDashboardFolderCannotHaveParent = errors.New("A Dashboard Folder cannot be added to another folder")
|
||||
ErrDashboardFailedToUpdateAlertData = errors.New("Failed to save alert data")
|
||||
ErrDashboardsWithSameSlugExists = errors.New("Multiple dashboards with the same slug exists")
|
||||
ErrDashboardFailedGenerateUniqueUid = errors.New("Failed to generate unique dashboard id")
|
||||
ErrDashboardTypeMismatch = errors.New("Dashboard cannot be changed to a folder")
|
||||
|
||||
@@ -112,7 +112,7 @@ func (e *DashAlertExtractor) getAlertFromPanels(jsonWithPanels *simplejson.Json,
|
||||
|
||||
frequency, err := getTimeDurationStringToSeconds(jsonAlert.Get("frequency").MustString())
|
||||
if err != nil {
|
||||
return nil, ValidationError{Reason: "Could not parse frequency"}
|
||||
return nil, ValidationError{Reason: err.Error()}
|
||||
}
|
||||
|
||||
rawFor := jsonAlert.Get("for").MustString()
|
||||
|
||||
@@ -1,16 +1,21 @@
|
||||
package alerting
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrFrequencyCannotBeZeroOrLess = errors.New(`"evaluate every" cannot be zero or below`)
|
||||
ErrFrequencyCouldNotBeParsed = errors.New(`"evaluate every" field could not be parsed`)
|
||||
)
|
||||
|
||||
type Rule struct {
|
||||
Id int64
|
||||
OrgId int64
|
||||
@@ -76,7 +81,7 @@ func getTimeDurationStringToSeconds(str string) (int64, error) {
|
||||
matches := ValueFormatRegex.FindAllString(str, 1)
|
||||
|
||||
if len(matches) <= 0 {
|
||||
return 0, fmt.Errorf("Frequency could not be parsed")
|
||||
return 0, ErrFrequencyCouldNotBeParsed
|
||||
}
|
||||
|
||||
value, err := strconv.Atoi(matches[0])
|
||||
@@ -84,6 +89,10 @@ func getTimeDurationStringToSeconds(str string) (int64, error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if value == 0 {
|
||||
return 0, ErrFrequencyCannotBeZeroOrLess
|
||||
}
|
||||
|
||||
unit := UnitFormatRegex.FindAllString(str, 1)[0]
|
||||
|
||||
if val, ok := unitMultiplier[unit]; ok {
|
||||
@@ -101,7 +110,6 @@ func NewRuleFromDBAlert(ruleDef *m.Alert) (*Rule, error) {
|
||||
model.PanelId = ruleDef.PanelId
|
||||
model.Name = ruleDef.Name
|
||||
model.Message = ruleDef.Message
|
||||
model.Frequency = ruleDef.Frequency
|
||||
model.State = ruleDef.State
|
||||
model.LastStateChange = ruleDef.NewStateDate
|
||||
model.For = ruleDef.For
|
||||
@@ -109,6 +117,13 @@ func NewRuleFromDBAlert(ruleDef *m.Alert) (*Rule, error) {
|
||||
model.ExecutionErrorState = m.ExecutionErrorOption(ruleDef.Settings.Get("executionErrorState").MustString("alerting"))
|
||||
model.StateChanges = ruleDef.StateChanges
|
||||
|
||||
model.Frequency = ruleDef.Frequency
|
||||
// frequency cannot be zero since that would not execute the alert rule.
|
||||
// so we fallback to 60 seconds if `Freqency` is missing
|
||||
if model.Frequency == 0 {
|
||||
model.Frequency = 60
|
||||
}
|
||||
|
||||
for _, v := range ruleDef.Settings.Get("notifications").MustArray() {
|
||||
jsonModel := simplejson.NewFromAny(v)
|
||||
id, err := jsonModel.Get("id").Int64()
|
||||
|
||||
@@ -14,6 +14,36 @@ func (f *FakeCondition) Eval(context *EvalContext) (*ConditionResult, error) {
|
||||
return &ConditionResult{}, nil
|
||||
}
|
||||
|
||||
func TestAlertRuleFrequencyParsing(t *testing.T) {
|
||||
tcs := []struct {
|
||||
input string
|
||||
err error
|
||||
result int64
|
||||
}{
|
||||
{input: "10s", result: 10},
|
||||
{input: "10m", result: 600},
|
||||
{input: "1h", result: 3600},
|
||||
{input: "1o", result: 1},
|
||||
{input: "0s", err: ErrFrequencyCannotBeZeroOrLess},
|
||||
{input: "0m", err: ErrFrequencyCannotBeZeroOrLess},
|
||||
{input: "0h", err: ErrFrequencyCannotBeZeroOrLess},
|
||||
{input: "0", err: ErrFrequencyCannotBeZeroOrLess},
|
||||
{input: "-1s", err: ErrFrequencyCouldNotBeParsed},
|
||||
}
|
||||
|
||||
for _, tc := range tcs {
|
||||
r, err := getTimeDurationStringToSeconds(tc.input)
|
||||
if err != tc.err {
|
||||
t.Errorf("expected error: '%v' got: '%v'", tc.err, err)
|
||||
return
|
||||
}
|
||||
|
||||
if r != tc.result {
|
||||
t.Errorf("expected result: %d got %d", tc.result, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertRuleModel(t *testing.T) {
|
||||
Convey("Testing alert rule", t, func() {
|
||||
|
||||
@@ -21,26 +51,6 @@ func TestAlertRuleModel(t *testing.T) {
|
||||
return &FakeCondition{}, nil
|
||||
})
|
||||
|
||||
Convey("Can parse seconds", func() {
|
||||
seconds, _ := getTimeDurationStringToSeconds("10s")
|
||||
So(seconds, ShouldEqual, 10)
|
||||
})
|
||||
|
||||
Convey("Can parse minutes", func() {
|
||||
seconds, _ := getTimeDurationStringToSeconds("10m")
|
||||
So(seconds, ShouldEqual, 600)
|
||||
})
|
||||
|
||||
Convey("Can parse hours", func() {
|
||||
seconds, _ := getTimeDurationStringToSeconds("1h")
|
||||
So(seconds, ShouldEqual, 3600)
|
||||
})
|
||||
|
||||
Convey("defaults to seconds", func() {
|
||||
seconds, _ := getTimeDurationStringToSeconds("1o")
|
||||
So(seconds, ShouldEqual, 1)
|
||||
})
|
||||
|
||||
Convey("should return err for empty string", func() {
|
||||
_, err := getTimeDurationStringToSeconds("")
|
||||
So(err, ShouldNotBeNil)
|
||||
@@ -89,5 +99,35 @@ func TestAlertRuleModel(t *testing.T) {
|
||||
So(len(alertRule.Notifications), ShouldEqual, 2)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("can construct alert rule model with invalid frequency", func() {
|
||||
json := `
|
||||
{
|
||||
"name": "name2",
|
||||
"description": "desc2",
|
||||
"noDataMode": "critical",
|
||||
"enabled": true,
|
||||
"frequency": "0s",
|
||||
"conditions": [ { "type": "test", "prop": 123 } ],
|
||||
"notifications": []
|
||||
}`
|
||||
|
||||
alertJSON, jsonErr := simplejson.NewJson([]byte(json))
|
||||
So(jsonErr, ShouldBeNil)
|
||||
|
||||
alert := &m.Alert{
|
||||
Id: 1,
|
||||
OrgId: 1,
|
||||
DashboardId: 1,
|
||||
PanelId: 1,
|
||||
Frequency: 0,
|
||||
|
||||
Settings: alertJSON,
|
||||
}
|
||||
|
||||
alertRule, err := NewRuleFromDBAlert(alert)
|
||||
So(err, ShouldBeNil)
|
||||
So(alertRule.Frequency, ShouldEqual, 60)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -165,7 +165,7 @@ func (dr *dashboardServiceImpl) updateAlerting(cmd *models.SaveDashboardCommand,
|
||||
}
|
||||
|
||||
if err := bus.Dispatch(&alertCmd); err != nil {
|
||||
return models.ErrDashboardFailedToUpdateAlertData
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -175,7 +175,9 @@ func (dr *dashboardServiceImpl) SaveProvisionedDashboard(dto *SaveDashboardDTO,
|
||||
dto.User = &models.SignedInUser{
|
||||
UserId: 0,
|
||||
OrgRole: models.ROLE_ADMIN,
|
||||
OrgId: dto.OrgId,
|
||||
}
|
||||
|
||||
cmd, err := dr.buildSaveDashboardCommand(dto, true, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -29,18 +29,22 @@ import (
|
||||
|
||||
// MysqlStore represents a mysql session store implementation.
|
||||
type MysqlStore struct {
|
||||
c *sql.DB
|
||||
sid string
|
||||
lock sync.RWMutex
|
||||
data map[interface{}]interface{}
|
||||
c *sql.DB
|
||||
sid string
|
||||
lock sync.RWMutex
|
||||
data map[interface{}]interface{}
|
||||
expiry int64
|
||||
dirty bool
|
||||
}
|
||||
|
||||
// NewMysqlStore creates and returns a mysql session store.
|
||||
func NewMysqlStore(c *sql.DB, sid string, kv map[interface{}]interface{}) *MysqlStore {
|
||||
func NewMysqlStore(c *sql.DB, sid string, kv map[interface{}]interface{}, expiry int64) *MysqlStore {
|
||||
return &MysqlStore{
|
||||
c: c,
|
||||
sid: sid,
|
||||
data: kv,
|
||||
c: c,
|
||||
sid: sid,
|
||||
data: kv,
|
||||
expiry: expiry,
|
||||
dirty: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,6 +54,7 @@ func (s *MysqlStore) Set(key, val interface{}) error {
|
||||
defer s.lock.Unlock()
|
||||
|
||||
s.data[key] = val
|
||||
s.dirty = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -67,6 +72,7 @@ func (s *MysqlStore) Delete(key interface{}) error {
|
||||
defer s.lock.Unlock()
|
||||
|
||||
delete(s.data, key)
|
||||
s.dirty = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -77,13 +83,20 @@ func (s *MysqlStore) ID() string {
|
||||
|
||||
// Release releases resource and save data to provider.
|
||||
func (s *MysqlStore) Release() error {
|
||||
newExpiry := time.Now().Unix()
|
||||
if !s.dirty && (s.expiry+60) >= newExpiry {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := session.EncodeGob(s.data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = s.c.Exec("UPDATE session SET data=?, expiry=? WHERE `key`=?",
|
||||
data, time.Now().Unix(), s.sid)
|
||||
data, newExpiry, s.sid)
|
||||
s.dirty = false
|
||||
s.expiry = newExpiry
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -93,6 +106,7 @@ func (s *MysqlStore) Flush() error {
|
||||
defer s.lock.Unlock()
|
||||
|
||||
s.data = make(map[interface{}]interface{})
|
||||
s.dirty = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -117,11 +131,12 @@ func (p *MysqlProvider) Init(expire int64, connStr string) (err error) {
|
||||
|
||||
// Read returns raw session store by session ID.
|
||||
func (p *MysqlProvider) Read(sid string) (session.RawStore, error) {
|
||||
expiry := time.Now().Unix()
|
||||
var data []byte
|
||||
err := p.c.QueryRow("SELECT data FROM session WHERE `key`=?", sid).Scan(&data)
|
||||
err := p.c.QueryRow("SELECT data,expiry FROM session WHERE `key`=?", sid).Scan(&data, &expiry)
|
||||
if err == sql.ErrNoRows {
|
||||
_, err = p.c.Exec("INSERT INTO session(`key`,data,expiry) VALUES(?,?,?)",
|
||||
sid, "", time.Now().Unix())
|
||||
sid, "", expiry)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -137,7 +152,7 @@ func (p *MysqlProvider) Read(sid string) (session.RawStore, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return NewMysqlStore(p.c, sid, kv), nil
|
||||
return NewMysqlStore(p.c, sid, kv, expiry), nil
|
||||
}
|
||||
|
||||
// Exist returns true if session with given ID exists.
|
||||
|
||||
@@ -53,14 +53,14 @@ func GetDataSourceByName(query *m.GetDataSourceByNameQuery) error {
|
||||
}
|
||||
|
||||
func GetDataSources(query *m.GetDataSourcesQuery) error {
|
||||
sess := x.Limit(1000, 0).Where("org_id=?", query.OrgId).Asc("name")
|
||||
sess := x.Limit(5000, 0).Where("org_id=?", query.OrgId).Asc("name")
|
||||
|
||||
query.Result = make([]*m.DataSource, 0)
|
||||
return sess.Find(&query.Result)
|
||||
}
|
||||
|
||||
func GetAllDataSources(query *m.GetAllDataSourcesQuery) error {
|
||||
sess := x.Limit(1000, 0).Asc("name")
|
||||
sess := x.Limit(5000, 0).Asc("name")
|
||||
|
||||
query.Result = make([]*m.DataSource, 0)
|
||||
return sess.Find(&query.Result)
|
||||
|
||||
@@ -99,14 +99,14 @@ func UpdateOrgQuota(cmd *m.UpdateOrgQuotaCmd) error {
|
||||
return inTransaction(func(sess *DBSession) error {
|
||||
//Check if quota is already defined in the DB
|
||||
quota := m.Quota{
|
||||
Target: cmd.Target,
|
||||
OrgId: cmd.OrgId,
|
||||
Updated: time.Now(),
|
||||
Target: cmd.Target,
|
||||
OrgId: cmd.OrgId,
|
||||
}
|
||||
has, err := sess.Get("a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
quota.Updated = time.Now()
|
||||
quota.Limit = cmd.Limit
|
||||
if !has {
|
||||
quota.Created = time.Now()
|
||||
@@ -201,14 +201,14 @@ func UpdateUserQuota(cmd *m.UpdateUserQuotaCmd) error {
|
||||
return inTransaction(func(sess *DBSession) error {
|
||||
//Check if quota is already defined in the DB
|
||||
quota := m.Quota{
|
||||
Target: cmd.Target,
|
||||
UserId: cmd.UserId,
|
||||
Updated: time.Now(),
|
||||
Target: cmd.Target,
|
||||
UserId: cmd.UserId,
|
||||
}
|
||||
has, err := sess.Get("a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
quota.Updated = time.Now()
|
||||
quota.Limit = cmd.Limit
|
||||
if !has {
|
||||
quota.Created = time.Now()
|
||||
|
||||
@@ -2,6 +2,7 @@ package sqlstore
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
@@ -168,5 +169,69 @@ func TestQuotaCommandsAndQueries(t *testing.T) {
|
||||
So(query.Result.Limit, ShouldEqual, 5)
|
||||
So(query.Result.Used, ShouldEqual, 1)
|
||||
})
|
||||
|
||||
// related: https://github.com/grafana/grafana/issues/14342
|
||||
Convey("Should org quota updating is successful even if it called multiple time", func() {
|
||||
orgCmd := m.UpdateOrgQuotaCmd{
|
||||
OrgId: orgId,
|
||||
Target: "org_user",
|
||||
Limit: 5,
|
||||
}
|
||||
err := UpdateOrgQuota(&orgCmd)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
query := m.GetOrgQuotaByTargetQuery{OrgId: orgId, Target: "org_user", Default: 1}
|
||||
err = GetOrgQuotaByTarget(&query)
|
||||
So(err, ShouldBeNil)
|
||||
So(query.Result.Limit, ShouldEqual, 5)
|
||||
|
||||
// XXX: resolution of `Updated` column is 1sec, so this makes delay
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
orgCmd = m.UpdateOrgQuotaCmd{
|
||||
OrgId: orgId,
|
||||
Target: "org_user",
|
||||
Limit: 10,
|
||||
}
|
||||
err = UpdateOrgQuota(&orgCmd)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
query = m.GetOrgQuotaByTargetQuery{OrgId: orgId, Target: "org_user", Default: 1}
|
||||
err = GetOrgQuotaByTarget(&query)
|
||||
So(err, ShouldBeNil)
|
||||
So(query.Result.Limit, ShouldEqual, 10)
|
||||
})
|
||||
|
||||
// related: https://github.com/grafana/grafana/issues/14342
|
||||
Convey("Should user quota updating is successful even if it called multiple time", func() {
|
||||
userQuotaCmd := m.UpdateUserQuotaCmd{
|
||||
UserId: userId,
|
||||
Target: "org_user",
|
||||
Limit: 5,
|
||||
}
|
||||
err := UpdateUserQuota(&userQuotaCmd)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
query := m.GetUserQuotaByTargetQuery{UserId: userId, Target: "org_user", Default: 1}
|
||||
err = GetUserQuotaByTarget(&query)
|
||||
So(err, ShouldBeNil)
|
||||
So(query.Result.Limit, ShouldEqual, 5)
|
||||
|
||||
// XXX: resolution of `Updated` column is 1sec, so this makes delay
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
userQuotaCmd = m.UpdateUserQuotaCmd{
|
||||
UserId: userId,
|
||||
Target: "org_user",
|
||||
Limit: 10,
|
||||
}
|
||||
err = UpdateUserQuota(&userQuotaCmd)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
query = m.GetUserQuotaByTargetQuery{UserId: userId, Target: "org_user", Default: 1}
|
||||
err = GetUserQuotaByTarget(&query)
|
||||
So(err, ShouldBeNil)
|
||||
So(query.Result.Limit, ShouldEqual, 10)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -345,8 +345,12 @@ func GetUserOrgList(query *m.GetUserOrgListQuery) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func newSignedInUserCacheKey(orgID, userID int64) string {
|
||||
return fmt.Sprintf("signed-in-user-%d-%d", userID, orgID)
|
||||
}
|
||||
|
||||
func (ss *SqlStore) GetSignedInUserWithCache(query *m.GetSignedInUserQuery) error {
|
||||
cacheKey := fmt.Sprintf("signed-in-user-%d-%d", query.UserId, query.OrgId)
|
||||
cacheKey := newSignedInUserCacheKey(query.OrgId, query.UserId)
|
||||
if cached, found := ss.CacheService.Get(cacheKey); found {
|
||||
query.Result = cached.(*m.SignedInUser)
|
||||
return nil
|
||||
@@ -357,6 +361,7 @@ func (ss *SqlStore) GetSignedInUserWithCache(query *m.GetSignedInUserQuery) erro
|
||||
return err
|
||||
}
|
||||
|
||||
cacheKey = newSignedInUserCacheKey(query.Result.OrgId, query.UserId)
|
||||
ss.CacheService.Set(cacheKey, query.Result, time.Second*5)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
func TestUserDataAccess(t *testing.T) {
|
||||
|
||||
Convey("Testing DB", t, func() {
|
||||
InitTestDB(t)
|
||||
ss := InitTestDB(t)
|
||||
|
||||
Convey("Creating a user", func() {
|
||||
cmd := &m.CreateUserCommand{
|
||||
@@ -153,6 +153,27 @@ func TestUserDataAccess(t *testing.T) {
|
||||
So(prefsQuery.Result.UserId, ShouldEqual, 0)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("when retreiving signed in user for orgId=0 result should return active org id", func() {
|
||||
ss.CacheService.Flush()
|
||||
|
||||
query := &m.GetSignedInUserQuery{OrgId: users[1].OrgId, UserId: users[1].Id}
|
||||
err := ss.GetSignedInUserWithCache(query)
|
||||
So(err, ShouldBeNil)
|
||||
So(query.Result, ShouldNotBeNil)
|
||||
So(query.OrgId, ShouldEqual, users[1].OrgId)
|
||||
err = SetUsingOrg(&m.SetUsingOrgCommand{UserId: users[1].Id, OrgId: users[0].OrgId})
|
||||
So(err, ShouldBeNil)
|
||||
query = &m.GetSignedInUserQuery{OrgId: 0, UserId: users[1].Id}
|
||||
err = ss.GetSignedInUserWithCache(query)
|
||||
So(err, ShouldBeNil)
|
||||
So(query.Result, ShouldNotBeNil)
|
||||
So(query.Result.OrgId, ShouldEqual, users[0].OrgId)
|
||||
|
||||
cacheKey := newSignedInUserCacheKey(query.Result.OrgId, query.UserId)
|
||||
_, found := ss.CacheService.Get(cacheKey)
|
||||
So(found, ShouldBeTrue)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -126,6 +126,18 @@ func (e *CloudWatchExecutor) executeTimeSeriesQuery(ctx context.Context, queryCo
|
||||
}
|
||||
|
||||
eg.Go(func() error {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
plog.Error("Execute Query Panic", "error", err, "stack", log.Stack(1))
|
||||
if theErr, ok := err.(error); ok {
|
||||
resultChan <- &tsdb.QueryResult{
|
||||
RefId: query.RefId,
|
||||
Error: theErr,
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
queryRes, err := e.executeQuery(ectx, query, queryContext)
|
||||
if ae, ok := err.(awserr.Error); ok && ae.Code() == "500" {
|
||||
return err
|
||||
@@ -146,6 +158,17 @@ func (e *CloudWatchExecutor) executeTimeSeriesQuery(ctx context.Context, queryCo
|
||||
for region, getMetricDataQuery := range getMetricDataQueries {
|
||||
q := getMetricDataQuery
|
||||
eg.Go(func() error {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
plog.Error("Execute Get Metric Data Query Panic", "error", err, "stack", log.Stack(1))
|
||||
if theErr, ok := err.(error); ok {
|
||||
resultChan <- &tsdb.QueryResult{
|
||||
Error: theErr,
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
queryResponses, err := e.executeGetMetricDataQuery(ectx, region, q, queryContext)
|
||||
if ae, ok := err.(awserr.Error); ok && ae.Code() == "500" {
|
||||
return err
|
||||
@@ -188,8 +211,8 @@ func (e *CloudWatchExecutor) executeQuery(ctx context.Context, query *CloudWatch
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if endTime.Before(startTime) {
|
||||
return nil, fmt.Errorf("Invalid time range: End time can't be before start time")
|
||||
if !startTime.Before(endTime) {
|
||||
return nil, fmt.Errorf("Invalid time range: Start time must be before end time")
|
||||
}
|
||||
|
||||
params := &cloudwatch.GetMetricStatisticsInput{
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
package cloudwatch
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
||||
"github.com/grafana/grafana/pkg/components/null"
|
||||
@@ -14,6 +18,24 @@ import (
|
||||
func TestCloudWatch(t *testing.T) {
|
||||
Convey("CloudWatch", t, func() {
|
||||
|
||||
Convey("executeQuery", func() {
|
||||
e := &CloudWatchExecutor{
|
||||
DataSource: &models.DataSource{
|
||||
JsonData: simplejson.New(),
|
||||
},
|
||||
}
|
||||
|
||||
Convey("End time before start time should result in error", func() {
|
||||
_, err := e.executeQuery(context.Background(), &CloudWatchQuery{}, &tsdb.TsdbQuery{TimeRange: tsdb.NewTimeRange("now-1h", "now-2h")})
|
||||
So(err.Error(), ShouldEqual, "Invalid time range: Start time must be before end time")
|
||||
})
|
||||
|
||||
Convey("End time equals start time should result in error", func() {
|
||||
_, err := e.executeQuery(context.Background(), &CloudWatchQuery{}, &tsdb.TsdbQuery{TimeRange: tsdb.NewTimeRange("now-1h", "now-1h")})
|
||||
So(err.Error(), ShouldEqual, "Invalid time range: Start time must be before end time")
|
||||
})
|
||||
})
|
||||
|
||||
Convey("can parse cloudwatch json model", func() {
|
||||
json := `
|
||||
{
|
||||
|
||||
@@ -47,6 +47,7 @@ func init() {
|
||||
"AWS/CloudFront": {"Requests", "BytesDownloaded", "BytesUploaded", "TotalErrorRate", "4xxErrorRate", "5xxErrorRate"},
|
||||
"AWS/CloudSearch": {"SuccessfulRequests", "SearchableDocuments", "IndexUtilization", "Partitions"},
|
||||
"AWS/CloudHSM": {"HsmUnhealthy", "HsmTemperature", "HsmKeysSessionOccupied", "HsmKeysTokenOccupied", "HsmSslCtxsOccupied", "HsmSessionCount", "HsmUsersAvailable", "HsmUsersMax", "InterfaceEth2OctetsInput", "InterfaceEth2OctetsOutput"},
|
||||
"AWS/CodeBuild": {"BuildDuration", "Builds", "DownloadSourceDuration", "Duration", "FailedBuilds", "FinalizingDuration", "InstallDuration", "PostBuildDuration", "PreBuildDuration", "ProvisioningDuration", "QueuedDuration", "SubmittedDuration", "SucceededBuilds", "UploadArtifactsDuration"},
|
||||
"AWS/Connect": {"CallsBreachingConcurrencyQuota", "CallBackNotDialableNumber", "CallRecordingUploadError", "CallsPerInterval", "ConcurrentCalls", "ConcurrentCallsPercentage", "ContactFlowErrors", "ContactFlowFatalErrors", "LongestQueueWaitTime", "MissedCalls", "MisconfiguredPhoneNumbers", "PublicSigningKeyUsage", "QueueCapacityExceededError", "QueueSize", "ThrottledCalls", "ToInstancePacketLossRate"},
|
||||
"AWS/DMS": {"FreeableMemory", "WriteIOPS", "ReadIOPS", "WriteThroughput", "ReadThroughput", "WriteLatency", "ReadLatency", "SwapUsage", "NetworkTransmitThroughput", "NetworkReceiveThroughput", "FullLoadThroughputBandwidthSource", "FullLoadThroughputBandwidthTarget", "FullLoadThroughputRowsSource", "FullLoadThroughputRowsTarget", "CDCIncomingChanges", "CDCChangesMemorySource", "CDCChangesMemoryTarget", "CDCChangesDiskSource", "CDCChangesDiskTarget", "CDCThroughputBandwidthTarget", "CDCThroughputRowsSource", "CDCThroughputRowsTarget", "CDCLatencySource", "CDCLatencyTarget"},
|
||||
"AWS/DX": {"ConnectionState", "ConnectionBpsEgress", "ConnectionBpsIngress", "ConnectionPpsEgress", "ConnectionPpsIngress", "ConnectionCRCErrorCount", "ConnectionLightLevelTx", "ConnectionLightLevelRx"},
|
||||
@@ -100,7 +101,7 @@ func init() {
|
||||
"AWS/RDS": {"ActiveTransactions", "AuroraBinlogReplicaLag", "AuroraReplicaLag", "AuroraReplicaLagMaximum", "AuroraReplicaLagMinimum", "BinLogDiskUsage", "BlockedTransactions", "BufferCacheHitRatio", "BurstBalance", "CommitLatency", "CommitThroughput", "BinLogDiskUsage", "CPUCreditBalance", "CPUCreditUsage", "CPUUtilization", "DatabaseConnections", "DDLLatency", "DDLThroughput", "Deadlocks", "DeleteLatency", "DeleteThroughput", "DiskQueueDepth", "DMLLatency", "DMLThroughput", "EngineUptime", "FailedSqlStatements", "FreeableMemory", "FreeLocalStorage", "FreeStorageSpace", "InsertLatency", "InsertThroughput", "LoginFailures", "NetworkReceiveThroughput", "NetworkTransmitThroughput", "NetworkThroughput", "Queries", "ReadIOPS", "ReadLatency", "ReadThroughput", "ReplicaLag", "ResultSetCacheHitRatio", "SelectLatency", "SelectThroughput", "SwapUsage", "TotalConnections", "UpdateLatency", "UpdateThroughput", "VolumeBytesUsed", "VolumeReadIOPS", "VolumeWriteIOPS", "WriteIOPS", "WriteLatency", "WriteThroughput"},
|
||||
"AWS/Route53": {"ChildHealthCheckHealthyCount", "HealthCheckStatus", "HealthCheckPercentageHealthy", "ConnectionTime", "SSLHandshakeTime", "TimeToFirstByte"},
|
||||
"AWS/S3": {"BucketSizeBytes", "NumberOfObjects", "AllRequests", "GetRequests", "PutRequests", "DeleteRequests", "HeadRequests", "PostRequests", "ListRequests", "BytesDownloaded", "BytesUploaded", "4xxErrors", "5xxErrors", "FirstByteLatency", "TotalRequestLatency"},
|
||||
"AWS/SES": {"Bounce", "Complaint", "Delivery", "Reject", "Send"},
|
||||
"AWS/SES": {"Bounce", "Complaint", "Delivery", "Reject", "Send", "Reputation.BounceRate", "Reputation.ComplaintRate"},
|
||||
"AWS/SNS": {"NumberOfMessagesPublished", "PublishSize", "NumberOfNotificationsDelivered", "NumberOfNotificationsFailed"},
|
||||
"AWS/SQS": {"NumberOfMessagesSent", "SentMessageSize", "NumberOfMessagesReceived", "NumberOfEmptyReceives", "NumberOfMessagesDeleted", "ApproximateAgeOfOldestMessage", "ApproximateNumberOfMessagesDelayed", "ApproximateNumberOfMessagesVisible", "ApproximateNumberOfMessagesNotVisible"},
|
||||
"AWS/States": {"ExecutionTime", "ExecutionThrottled", "ExecutionsAborted", "ExecutionsFailed", "ExecutionsStarted", "ExecutionsSucceeded", "ExecutionsTimedOut", "ActivityRunTime", "ActivityScheduleTime", "ActivityTime", "ActivitiesFailed", "ActivitiesHeartbeatTimedOut", "ActivitiesScheduled", "ActivitiesScheduled", "ActivitiesSucceeded", "ActivitiesTimedOut", "LambdaFunctionRunTime", "LambdaFunctionScheduleTime", "LambdaFunctionTime", "LambdaFunctionsFailed", "LambdaFunctionsHeartbeatTimedOut", "LambdaFunctionsScheduled", "LambdaFunctionsStarted", "LambdaFunctionsSucceeded", "LambdaFunctionsTimedOut"},
|
||||
@@ -123,6 +124,7 @@ func init() {
|
||||
"AWS/CloudFront": {"DistributionId", "Region"},
|
||||
"AWS/CloudSearch": {},
|
||||
"AWS/CloudHSM": {"Region", "ClusterId", "HsmId"},
|
||||
"AWS/CodeBuild": {"ProjectName"},
|
||||
"AWS/Connect": {"InstanceId", "MetricGroup", "Participant", "QueueName", "Stream Type", "Type of Connection"},
|
||||
"AWS/DMS": {"ReplicationInstanceIdentifier", "ReplicationTaskIdentifier"},
|
||||
"AWS/DX": {"ConnectionId"},
|
||||
|
||||
@@ -65,7 +65,7 @@ var NewClient = func(ctx context.Context, ds *models.DataSource, timeRange *tsdb
|
||||
clientLog.Debug("Creating new client", "version", version, "timeField", timeField, "indices", strings.Join(indices, ", "))
|
||||
|
||||
switch version {
|
||||
case 2, 5, 56:
|
||||
case 2, 5, 56, 60:
|
||||
return &baseClientImpl{
|
||||
ctx: ctx,
|
||||
ds: ds,
|
||||
|
||||
@@ -90,6 +90,19 @@ func TestClient(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
So(c.GetVersion(), ShouldEqual, 56)
|
||||
})
|
||||
|
||||
Convey("When version 60 should return v6.0 client", func() {
|
||||
ds := &models.DataSource{
|
||||
JsonData: simplejson.NewFromAny(map[string]interface{}{
|
||||
"esVersion": 60,
|
||||
"timeField": "@timestamp",
|
||||
}),
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), ds, nil)
|
||||
So(err, ShouldBeNil)
|
||||
So(c.GetVersion(), ShouldEqual, 60)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Given a fake http client", func() {
|
||||
@@ -153,8 +166,6 @@ func TestClient(t *testing.T) {
|
||||
jBody, err := simplejson.NewJson(bodyBytes)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
fmt.Println("body", string(headerBytes))
|
||||
|
||||
So(jHeader.Get("index").MustString(), ShouldEqual, "metrics-2018.05.15")
|
||||
So(jHeader.Get("ignore_unavailable").MustBool(false), ShouldEqual, true)
|
||||
So(jHeader.Get("search_type").MustString(), ShouldEqual, "count")
|
||||
@@ -209,8 +220,6 @@ func TestClient(t *testing.T) {
|
||||
jBody, err := simplejson.NewJson(bodyBytes)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
fmt.Println("body", string(headerBytes))
|
||||
|
||||
So(jHeader.Get("index").MustString(), ShouldEqual, "metrics-2018.05.15")
|
||||
So(jHeader.Get("ignore_unavailable").MustBool(false), ShouldEqual, true)
|
||||
So(jHeader.Get("search_type").MustString(), ShouldEqual, "query_then_fetch")
|
||||
@@ -265,8 +274,6 @@ func TestClient(t *testing.T) {
|
||||
jBody, err := simplejson.NewJson(bodyBytes)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
fmt.Println("body", string(headerBytes))
|
||||
|
||||
So(jHeader.Get("index").MustString(), ShouldEqual, "metrics-2018.05.15")
|
||||
So(jHeader.Get("ignore_unavailable").MustBool(false), ShouldEqual, true)
|
||||
So(jHeader.Get("search_type").MustString(), ShouldEqual, "query_then_fetch")
|
||||
|
||||
@@ -32,6 +32,7 @@ func init() {
|
||||
renders["median"] = QueryDefinition{Renderer: functionRenderer}
|
||||
renders["sum"] = QueryDefinition{Renderer: functionRenderer}
|
||||
renders["mode"] = QueryDefinition{Renderer: functionRenderer}
|
||||
renders["cumulative_sum"] = QueryDefinition{Renderer: functionRenderer}
|
||||
|
||||
renders["holt_winters"] = QueryDefinition{
|
||||
Renderer: functionRenderer,
|
||||
|
||||
@@ -23,6 +23,7 @@ func TestInfluxdbQueryPart(t *testing.T) {
|
||||
{mode: "alias", params: []string{"test"}, input: "mean(value)", expected: `mean(value) AS "test"`},
|
||||
{mode: "count", params: []string{}, input: "distinct(value)", expected: `count(distinct(value))`},
|
||||
{mode: "mode", params: []string{}, input: "value", expected: `mode(value)`},
|
||||
{mode: "cumulative_sum", params: []string{}, input: "mean(value)", expected: `cumulative_sum(mean(value))`},
|
||||
}
|
||||
|
||||
queryContext := &tsdb.TsdbQuery{TimeRange: tsdb.NewTimeRange("5m", "now")}
|
||||
|
||||
@@ -66,6 +66,10 @@ func (m *msSqlMacroEngine) evaluateMacro(name string, args []string) (string, er
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s BETWEEN '%s' AND '%s'", args[0], m.timeRange.GetFromAsTimeUTC().Format(time.RFC3339), m.timeRange.GetToAsTimeUTC().Format(time.RFC3339)), nil
|
||||
case "__timeFrom":
|
||||
return fmt.Sprintf("'%s'", m.timeRange.GetFromAsTimeUTC().Format(time.RFC3339)), nil
|
||||
case "__timeTo":
|
||||
return fmt.Sprintf("'%s'", m.timeRange.GetToAsTimeUTC().Format(time.RFC3339)), nil
|
||||
case "__timeGroup":
|
||||
if len(args) < 2 {
|
||||
return "", fmt.Errorf("macro %v needs time column and interval", name)
|
||||
|
||||
@@ -52,6 +52,20 @@ func TestMacroEngine(t *testing.T) {
|
||||
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column BETWEEN '%s' AND '%s'", from.Format(time.RFC3339), to.Format(time.RFC3339)))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select '2018-04-12T18:00:00Z'")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select '2018-04-12T18:05:00Z'")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m')")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
@@ -61,6 +61,10 @@ func (m *mySqlMacroEngine) evaluateMacro(name string, args []string) (string, er
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s BETWEEN FROM_UNIXTIME(%d) AND FROM_UNIXTIME(%d)", args[0], m.timeRange.GetFromAsSecondsEpoch(), m.timeRange.GetToAsSecondsEpoch()), nil
|
||||
case "__timeFrom":
|
||||
return fmt.Sprintf("FROM_UNIXTIME(%d)", m.timeRange.GetFromAsSecondsEpoch()), nil
|
||||
case "__timeTo":
|
||||
return fmt.Sprintf("FROM_UNIXTIME(%d)", m.timeRange.GetToAsSecondsEpoch()), nil
|
||||
case "__timeGroup":
|
||||
if len(args) < 2 {
|
||||
return "", fmt.Errorf("macro %v needs time column and interval", name)
|
||||
|
||||
@@ -63,6 +63,20 @@ func TestMacroEngine(t *testing.T) {
|
||||
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column BETWEEN FROM_UNIXTIME(%d) AND FROM_UNIXTIME(%d)", from.Unix(), to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select FROM_UNIXTIME(%d)", from.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select FROM_UNIXTIME(%d)", to.Unix()))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
@@ -761,7 +761,7 @@ func TestMySQL(t *testing.T) {
|
||||
{
|
||||
DataSource: &models.DataSource{JsonData: simplejson.New()},
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `SELECT time FROM metric_values WHERE time > $__timeFrom() OR time < $__timeFrom() OR 1 < $__unixEpochFrom() OR $__unixEpochTo() > 1 ORDER BY 1`,
|
||||
"rawSql": `SELECT time FROM metric_values WHERE time > $__timeFrom() OR time < $__timeTo() OR 1 < $__unixEpochFrom() OR $__unixEpochTo() > 1 ORDER BY 1`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
@@ -773,7 +773,7 @@ func TestMySQL(t *testing.T) {
|
||||
So(err, ShouldBeNil)
|
||||
queryResult := resp.Results["A"]
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
So(queryResult.Meta.Get("sql").MustString(), ShouldEqual, "SELECT time FROM metric_values WHERE time > '2018-03-15T12:55:00Z' OR time < '2018-03-15T12:55:00Z' OR 1 < 1521118500 OR 1521118800 > 1 ORDER BY 1")
|
||||
So(queryResult.Meta.Get("sql").MustString(), ShouldEqual, "SELECT time FROM metric_values WHERE time > FROM_UNIXTIME(1521118500) OR time < FROM_UNIXTIME(1521118800) OR 1 < 1521118500 OR 1521118800 > 1 ORDER BY 1")
|
||||
|
||||
})
|
||||
|
||||
|
||||
@@ -87,6 +87,10 @@ func (m *postgresMacroEngine) evaluateMacro(name string, args []string) (string,
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s BETWEEN '%s' AND '%s'", args[0], m.timeRange.GetFromAsTimeUTC().Format(time.RFC3339), m.timeRange.GetToAsTimeUTC().Format(time.RFC3339)), nil
|
||||
case "__timeFrom":
|
||||
return fmt.Sprintf("'%s'", m.timeRange.GetFromAsTimeUTC().Format(time.RFC3339)), nil
|
||||
case "__timeTo":
|
||||
return fmt.Sprintf("'%s'", m.timeRange.GetToAsTimeUTC().Format(time.RFC3339)), nil
|
||||
case "__timeGroup":
|
||||
if len(args) < 2 {
|
||||
return "", fmt.Errorf("macro %v needs time column and interval and optional fill value", name)
|
||||
|
||||
@@ -44,6 +44,20 @@ func TestMacroEngine(t *testing.T) {
|
||||
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column BETWEEN '%s' AND '%s'", from.Format(time.RFC3339), to.Format(time.RFC3339)))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select '2018-04-12T18:00:00Z'")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select '2018-04-12T18:05:00Z'")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function pre 5.3 compatibility", func() {
|
||||
|
||||
sql, err := engine.Interpolate(query, timeRange, "SELECT $__timeGroup(time_column,'5m'), value")
|
||||
|
||||
@@ -196,8 +196,6 @@ var Interpolate = func(query *Query, timeRange *TimeRange, sql string) (string,
|
||||
|
||||
sql = strings.Replace(sql, "$__interval_ms", strconv.FormatInt(interval.Milliseconds(), 10), -1)
|
||||
sql = strings.Replace(sql, "$__interval", interval.Text, -1)
|
||||
sql = strings.Replace(sql, "$__timeFrom()", fmt.Sprintf("'%s'", timeRange.GetFromAsTimeUTC().Format(time.RFC3339)), -1)
|
||||
sql = strings.Replace(sql, "$__timeTo()", fmt.Sprintf("'%s'", timeRange.GetToAsTimeUTC().Format(time.RFC3339)), -1)
|
||||
sql = strings.Replace(sql, "$__unixEpochFrom()", fmt.Sprintf("%d", timeRange.GetFromAsSecondsEpoch()), -1)
|
||||
sql = strings.Replace(sql, "$__unixEpochTo()", fmt.Sprintf("%d", timeRange.GetToAsSecondsEpoch()), -1)
|
||||
|
||||
|
||||
@@ -44,20 +44,6 @@ func TestSqlEngine(t *testing.T) {
|
||||
So(sql, ShouldEqual, "select 60000 ")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
sql, err := Interpolate(query, timeRange, "select $__timeFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select '%s'", from.Format(time.RFC3339)))
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
sql, err := Interpolate(query, timeRange, "select $__timeTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, fmt.Sprintf("select '%s'", to.Format(time.RFC3339)))
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFrom function", func() {
|
||||
sql, err := Interpolate(query, timeRange, "select $__unixEpochFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
@@ -16,7 +16,7 @@ export function registerAngularDirectives() {
|
||||
react2AngularDirective('searchResult', SearchResult, []);
|
||||
react2AngularDirective('tagFilter', TagFilter, [
|
||||
'tags',
|
||||
['onSelect', { watchDepth: 'reference' }],
|
||||
['onChange', { watchDepth: 'reference' }],
|
||||
['tagOptions', { watchDepth: 'reference' }],
|
||||
]);
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ class AddPermissions extends Component<Props, NewDashboardAclItem> {
|
||||
render() {
|
||||
const { onCancel } = this.props;
|
||||
const newItem = this.state;
|
||||
const pickerClassName = 'width-20';
|
||||
const pickerClassName = 'min-width-20';
|
||||
const isValid = this.isValid();
|
||||
return (
|
||||
<div className="gf-form-inline cta-form">
|
||||
|
||||
@@ -40,7 +40,7 @@ export class UserPicker extends Component<Props, State> {
|
||||
.then(result => {
|
||||
return result.map(user => ({
|
||||
id: user.userId,
|
||||
label: `${user.login} - ${user.email}`,
|
||||
label: user.login === user.email ? user.login : `${user.login} - ${user.email}`,
|
||||
avatarUrl: user.avatarUrl,
|
||||
login: user.login,
|
||||
}));
|
||||
|
||||
@@ -10,7 +10,7 @@ import ResetStyles from 'app/core/components/Picker/ResetStyles';
|
||||
export interface Props {
|
||||
tags: string[];
|
||||
tagOptions: () => any;
|
||||
onSelect: (tag: string) => void;
|
||||
onChange: (tags: string[]) => void;
|
||||
}
|
||||
|
||||
export class TagFilter extends React.Component<Props, any> {
|
||||
@@ -18,12 +18,9 @@ export class TagFilter extends React.Component<Props, any> {
|
||||
|
||||
constructor(props) {
|
||||
super(props);
|
||||
|
||||
this.searchTags = this.searchTags.bind(this);
|
||||
this.onChange = this.onChange.bind(this);
|
||||
}
|
||||
|
||||
searchTags(query) {
|
||||
onLoadOptions = query => {
|
||||
return this.props.tagOptions().then(options => {
|
||||
return options.map(option => ({
|
||||
value: option.term,
|
||||
@@ -31,18 +28,20 @@ export class TagFilter extends React.Component<Props, any> {
|
||||
count: option.count,
|
||||
}));
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
onChange(newTags) {
|
||||
this.props.onSelect(newTags);
|
||||
}
|
||||
onChange = (newTags: any[]) => {
|
||||
this.props.onChange(newTags.map(tag => tag.value));
|
||||
};
|
||||
|
||||
render() {
|
||||
const tags = this.props.tags.map(tag => ({ value: tag, label: tag, count: 0 }));
|
||||
|
||||
const selectOptions = {
|
||||
classNamePrefix: 'gf-form-select-box',
|
||||
isMulti: true,
|
||||
defaultOptions: true,
|
||||
loadOptions: this.searchTags,
|
||||
loadOptions: this.onLoadOptions,
|
||||
onChange: this.onChange,
|
||||
className: 'gf-form-input gf-form-input--form-dropdown',
|
||||
placeholder: 'Tags',
|
||||
@@ -50,8 +49,12 @@ export class TagFilter extends React.Component<Props, any> {
|
||||
noOptionsMessage: () => 'No tags found',
|
||||
getOptionValue: i => i.value,
|
||||
getOptionLabel: i => i.label,
|
||||
value: this.props.tags,
|
||||
value: tags,
|
||||
styles: ResetStyles,
|
||||
filterOption: (option, searchQuery) => {
|
||||
const regex = RegExp(searchQuery, 'i');
|
||||
return regex.test(option.value);
|
||||
},
|
||||
components: {
|
||||
Option: TagOption,
|
||||
IndicatorsContainer,
|
||||
|
||||
@@ -44,7 +44,7 @@ export class SeriesColorPicker extends React.Component<SeriesColorPickerProps> {
|
||||
const drop = new Drop({
|
||||
target: this.pickerElem,
|
||||
content: dropContentElem,
|
||||
position: 'top center',
|
||||
position: 'bottom center',
|
||||
classes: 'drop-popover',
|
||||
openOn: 'hover',
|
||||
hoverCloseDelay: 200,
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<tag-filter tags="ctrl.query.tag" tagOptions="ctrl.getTags" onSelect="ctrl.onTagSelect">
|
||||
<tag-filter tags="ctrl.query.tag" tagOptions="ctrl.getTags" onChange="ctrl.onTagFiltersChanged">
|
||||
</tag-filter>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -25,8 +25,6 @@ export class SearchCtrl {
|
||||
appEvents.on('hide-dash-search', this.closeSearch.bind(this), $scope);
|
||||
|
||||
this.initialFolderFilterTitle = 'All';
|
||||
this.getTags = this.getTags.bind(this);
|
||||
this.onTagSelect = this.onTagSelect.bind(this);
|
||||
this.isEditor = contextSrv.isEditor;
|
||||
this.hasEditPermissionInFolders = contextSrv.hasEditPermissionInFolders;
|
||||
}
|
||||
@@ -162,7 +160,7 @@ export class SearchCtrl {
|
||||
const localSearchId = this.currentSearchId;
|
||||
const query = {
|
||||
...this.query,
|
||||
tag: this.query.tag.map(i => i.value),
|
||||
tag: this.query.tag,
|
||||
};
|
||||
|
||||
return this.searchSrv.search(query).then(results => {
|
||||
@@ -195,14 +193,14 @@ export class SearchCtrl {
|
||||
evt.preventDefault();
|
||||
}
|
||||
|
||||
getTags() {
|
||||
getTags = () => {
|
||||
return this.searchSrv.getDashboardTags();
|
||||
}
|
||||
};
|
||||
|
||||
onTagSelect(newTags) {
|
||||
this.query.tag = newTags;
|
||||
onTagFiltersChanged = (tags: string[]) => {
|
||||
this.query.tag = tags;
|
||||
this.search();
|
||||
}
|
||||
};
|
||||
|
||||
clearSearchFilter() {
|
||||
this.query.tag = [];
|
||||
|
||||
@@ -428,10 +428,16 @@ kbn.valueFormats.hex0x = (value, decimals) => {
|
||||
};
|
||||
|
||||
kbn.valueFormats.sci = (value, decimals) => {
|
||||
if (value == null) {
|
||||
return '';
|
||||
}
|
||||
return value.toExponential(decimals);
|
||||
};
|
||||
|
||||
kbn.valueFormats.locale = (value, decimals) => {
|
||||
if (value == null) {
|
||||
return '';
|
||||
}
|
||||
return value.toLocaleString(undefined, { maximumFractionDigits: decimals });
|
||||
};
|
||||
|
||||
@@ -584,8 +590,8 @@ kbn.valueFormats.flowcms = kbn.formatBuilders.fixedUnit('cms');
|
||||
kbn.valueFormats.flowcfs = kbn.formatBuilders.fixedUnit('cfs');
|
||||
kbn.valueFormats.flowcfm = kbn.formatBuilders.fixedUnit('cfm');
|
||||
kbn.valueFormats.litreh = kbn.formatBuilders.fixedUnit('l/h');
|
||||
kbn.valueFormats.flowlpm = kbn.formatBuilders.decimalSIPrefix('L');
|
||||
kbn.valueFormats.flowmlpm = kbn.formatBuilders.decimalSIPrefix('L', -1);
|
||||
kbn.valueFormats.flowlpm = kbn.formatBuilders.fixedUnit('l/min');
|
||||
kbn.valueFormats.flowmlpm = kbn.formatBuilders.fixedUnit('mL/min');
|
||||
|
||||
// Angle
|
||||
kbn.valueFormats.degree = kbn.formatBuilders.fixedUnit('°');
|
||||
|
||||
@@ -64,9 +64,9 @@
|
||||
</div>
|
||||
<div class="gf-form">
|
||||
<metric-segment-model property="conditionModel.evaluator.type" options="ctrl.evalFunctions" custom="false" css-class="query-keyword" on-change="ctrl.evaluatorTypeChanged(conditionModel.evaluator)"></metric-segment-model>
|
||||
<input class="gf-form-input max-width-9" type="number" step="any" ng-hide="conditionModel.evaluator.params.length === 0" ng-model="conditionModel.evaluator.params[0]" ng-change="ctrl.evaluatorParamsChanged()"></input>
|
||||
<label class="gf-form-label query-keyword" ng-show="conditionModel.evaluator.params.length === 2">TO</label>
|
||||
<input class="gf-form-input max-width-9" type="number" step="any" ng-if="conditionModel.evaluator.params.length === 2" ng-model="conditionModel.evaluator.params[1]" ng-change="ctrl.evaluatorParamsChanged()"></input>
|
||||
<input class="gf-form-input max-width-9" type="number" step="any" ng-hide="conditionModel.evaluator.params.length === 0" ng-model="conditionModel.evaluator.params[0]" ng-change="ctrl.evaluatorParamsChanged()">
|
||||
<label class="gf-form-label query-keyword" ng-show="conditionModel.evaluator.params.length === 2">TO</label>
|
||||
<input class="gf-form-input max-width-9" type="number" step="any" ng-if="conditionModel.evaluator.params.length === 2" ng-model="conditionModel.evaluator.params[1]" ng-change="ctrl.evaluatorParamsChanged()">
|
||||
</div>
|
||||
<div class="gf-form">
|
||||
<label class="gf-form-label">
|
||||
|
||||
@@ -8,9 +8,9 @@ const alertQueryDef = new QueryPartDef({
|
||||
{
|
||||
name: 'from',
|
||||
type: 'string',
|
||||
options: ['1s', '10s', '1m', '5m', '10m', '15m', '1h', '24h', '48h'],
|
||||
options: ['10s', '1m', '5m', '10m', '15m', '1h', '24h', '48h'],
|
||||
},
|
||||
{ name: 'to', type: 'string', options: ['now'] },
|
||||
{ name: 'to', type: 'string', options: ['now', 'now-1m', 'now-5m', 'now-10m', 'now-1h'] },
|
||||
],
|
||||
defaultParams: ['#A', '15m', 'now', 'avg'],
|
||||
});
|
||||
|
||||
@@ -223,6 +223,8 @@ export class DashboardModel {
|
||||
}
|
||||
|
||||
panelInitialized(panel: PanelModel) {
|
||||
panel.initialized();
|
||||
|
||||
if (!this.otherPanelInFullscreen(panel)) {
|
||||
panel.refresh();
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ function dashLinksContainer() {
|
||||
return {
|
||||
scope: {
|
||||
links: '=',
|
||||
dashboard: '=',
|
||||
},
|
||||
restrict: 'E',
|
||||
controller: 'DashLinksContainerCtrl',
|
||||
@@ -20,6 +21,8 @@ function dashLink($compile, $sanitize, linkSrv) {
|
||||
restrict: 'E',
|
||||
link: (scope, elem) => {
|
||||
const link = scope.link;
|
||||
const dashboard = scope.dashboard;
|
||||
|
||||
let template =
|
||||
'<div class="gf-form">' +
|
||||
'<a class="pointer gf-form-label" data-placement="bottom"' +
|
||||
@@ -76,7 +79,7 @@ function dashLink($compile, $sanitize, linkSrv) {
|
||||
}
|
||||
|
||||
update();
|
||||
scope.$on('refresh', update);
|
||||
dashboard.events.on('refresh', update, scope);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@@ -132,7 +132,7 @@ export class PanelModel {
|
||||
}
|
||||
}
|
||||
|
||||
panelInitialized() {
|
||||
initialized() {
|
||||
this.events.emit('panel-initialized');
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
</div>
|
||||
|
||||
<div ng-if="ctrl.dashboard.links.length > 0" >
|
||||
<dash-links-container links="ctrl.dashboard.links" class="gf-form-inline"></dash-links-container>
|
||||
<dash-links-container links="ctrl.dashboard.links" dashboard="ctrl.dashboard" class="gf-form-inline"></dash-links-container>
|
||||
</div>
|
||||
|
||||
<div class="clearfix"></div>
|
||||
|
||||
@@ -148,7 +148,7 @@ export function loadDataSourceTypes(): ThunkResult<void> {
|
||||
export function nameExits(dataSources, name) {
|
||||
return (
|
||||
dataSources.filter(dataSource => {
|
||||
return dataSource.name === name;
|
||||
return dataSource.name.toLowerCase() === name.toLowerCase();
|
||||
}).length > 0
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
<div class="panel panel--solo" ng-if="panel" style="width: 100%">
|
||||
<div class="panel-solo" ng-if="panel">
|
||||
<plugin-component type="panel">
|
||||
</plugin-component>
|
||||
</div>
|
||||
<div class="clearfix"></div>
|
||||
|
||||
@@ -115,7 +115,7 @@ export class TeamMembers extends PureComponent<Props, State> {
|
||||
</button>
|
||||
<h5>Add Team Member</h5>
|
||||
<div className="gf-form-inline">
|
||||
<UserPicker onSelected={this.onUserSelected} className="width-30" />
|
||||
<UserPicker onSelected={this.onUserSelected} className="min-width-30" />
|
||||
{this.state.newTeamMember && (
|
||||
<button className="btn btn-success gf-form-btn" type="submit" onClick={this.onAddUserToTeam}>
|
||||
Add to team
|
||||
|
||||
@@ -58,7 +58,7 @@ exports[`Render should render component 1`] = `
|
||||
className="gf-form-inline"
|
||||
>
|
||||
<UserPicker
|
||||
className="width-30"
|
||||
className="min-width-30"
|
||||
onSelected={[Function]}
|
||||
/>
|
||||
</div>
|
||||
@@ -152,7 +152,7 @@ exports[`Render should render team members 1`] = `
|
||||
className="gf-form-inline"
|
||||
>
|
||||
<UserPicker
|
||||
className="width-30"
|
||||
className="min-width-30"
|
||||
onSelected={[Function]}
|
||||
/>
|
||||
</div>
|
||||
@@ -372,7 +372,7 @@ exports[`Render should render team members when sync enabled 1`] = `
|
||||
className="gf-form-inline"
|
||||
>
|
||||
<UserPicker
|
||||
className="width-30"
|
||||
className="min-width-30"
|
||||
onSelected={[Function]}
|
||||
/>
|
||||
</div>
|
||||
|
||||
@@ -2,22 +2,8 @@ import coreModule from 'app/core/core_module';
|
||||
import _ from 'lodash';
|
||||
import * as queryDef from './query_def';
|
||||
|
||||
export function elasticBucketAgg() {
|
||||
return {
|
||||
templateUrl: 'public/app/plugins/datasource/elasticsearch/partials/bucket_agg.html',
|
||||
controller: 'ElasticBucketAggCtrl',
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
target: '=',
|
||||
index: '=',
|
||||
onChange: '&',
|
||||
getFields: '&',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export class ElasticBucketAggCtrl {
|
||||
/** @nginject */
|
||||
/** @ngInject */
|
||||
constructor($scope, uiSegmentSrv, $q, $rootScope) {
|
||||
const bucketAggs = $scope.target.bucketAggs;
|
||||
|
||||
@@ -226,5 +212,18 @@ export class ElasticBucketAggCtrl {
|
||||
}
|
||||
}
|
||||
|
||||
export function elasticBucketAgg() {
|
||||
return {
|
||||
templateUrl: 'public/app/plugins/datasource/elasticsearch/partials/bucket_agg.html',
|
||||
controller: ElasticBucketAggCtrl,
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
target: '=',
|
||||
index: '=',
|
||||
onChange: '&',
|
||||
getFields: '&',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
coreModule.directive('elasticBucketAgg', elasticBucketAgg);
|
||||
coreModule.controller('ElasticBucketAggCtrl', ElasticBucketAggCtrl);
|
||||
|
||||
@@ -2,22 +2,8 @@ import coreModule from 'app/core/core_module';
|
||||
import _ from 'lodash';
|
||||
import * as queryDef from './query_def';
|
||||
|
||||
export function elasticMetricAgg() {
|
||||
return {
|
||||
templateUrl: 'public/app/plugins/datasource/elasticsearch/partials/metric_agg.html',
|
||||
controller: 'ElasticMetricAggCtrl',
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
target: '=',
|
||||
index: '=',
|
||||
onChange: '&',
|
||||
getFields: '&',
|
||||
esVersion: '=',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export class ElasticMetricAggCtrl {
|
||||
/** @ngInject */
|
||||
constructor($scope, uiSegmentSrv, $q, $rootScope) {
|
||||
const metricAggs = $scope.target.metrics;
|
||||
$scope.metricAggTypes = queryDef.getMetricAggTypes($scope.esVersion);
|
||||
@@ -209,5 +195,19 @@ export class ElasticMetricAggCtrl {
|
||||
}
|
||||
}
|
||||
|
||||
export function elasticMetricAgg() {
|
||||
return {
|
||||
templateUrl: 'public/app/plugins/datasource/elasticsearch/partials/metric_agg.html',
|
||||
controller: ElasticMetricAggCtrl,
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
target: '=',
|
||||
index: '=',
|
||||
onChange: '&',
|
||||
getFields: '&',
|
||||
esVersion: '=',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
coreModule.directive('elasticMetricAgg', elasticMetricAgg);
|
||||
coreModule.controller('ElasticMetricAggCtrl', ElasticMetricAggCtrl);
|
||||
|
||||
@@ -28,12 +28,12 @@ An annotation is an event that is overlaid on top of graphs. The query can have
|
||||
Macros:
|
||||
- $__time(column) -> UNIX_TIMESTAMP(column) as time (or as time_sec)
|
||||
- $__timeEpoch(column) -> UNIX_TIMESTAMP(column) as time (or as time_sec)
|
||||
- $__timeFilter(column) -> column BETWEEN '2017-04-21T05:01:17Z' AND '2017-04-21T05:01:17Z'
|
||||
- $__timeFilter(column) -> column BETWEEN FROM_UNIXTIME(1492750877) AND FROM_UNIXTIME(1492750877)
|
||||
- $__unixEpochFilter(column) -> time_unix_epoch > 1492750877 AND time_unix_epoch < 1492750877
|
||||
|
||||
Or build your own conditionals using these macros which just return the values:
|
||||
- $__timeFrom() -> '2017-04-21T05:01:17Z'
|
||||
- $__timeTo() -> '2017-04-21T05:01:17Z'
|
||||
- $__timeFrom() -> FROM_UNIXTIME(1492750877)
|
||||
- $__timeTo() -> FROM_UNIXTIME(1492750877)
|
||||
- $__unixEpochFrom() -> 1492750877
|
||||
- $__unixEpochTo() -> 1492750877
|
||||
</pre>
|
||||
|
||||
@@ -151,7 +151,7 @@ Table:
|
||||
Macros:
|
||||
- $__time(column) -> UNIX_TIMESTAMP(column) as time_sec
|
||||
- $__timeEpoch(column) -> UNIX_TIMESTAMP(column) as time_sec
|
||||
- $__timeFilter(column) -> column BETWEEN '2017-04-21T05:01:17Z' AND '2017-04-21T05:01:17Z'
|
||||
- $__timeFilter(column) -> column BETWEEN FROM_UNIXTIME(1492750877) AND FROM_UNIXTIME(1492750877)
|
||||
- $__unixEpochFilter(column) -> time_unix_epoch > 1492750877 AND time_unix_epoch < 1492750877
|
||||
- $__timeGroup(column,'5m'[, fillvalue]) -> cast(cast(UNIX_TIMESTAMP(column)/(300) as signed)*300 as signed)
|
||||
by setting fillvalue grafana will fill in missing values according to the interval
|
||||
@@ -169,8 +169,8 @@ GROUP BY 1
|
||||
ORDER BY 1
|
||||
|
||||
Or build your own conditionals using these macros which just return the values:
|
||||
- $__timeFrom() -> '2017-04-21T05:01:17Z'
|
||||
- $__timeTo() -> '2017-04-21T05:01:17Z'
|
||||
- $__timeFrom() -> FROM_UNIXTIME(1492750877)
|
||||
- $__timeTo() -> FROM_UNIXTIME(1492750877)
|
||||
- $__unixEpochFrom() -> 1492750877
|
||||
- $__unixEpochTo() -> 1492750877
|
||||
</pre>
|
||||
|
||||
@@ -151,8 +151,7 @@ table_schema IN (
|
||||
|
||||
buildDatatypeQuery(column: string) {
|
||||
let query = 'SELECT udt_name FROM information_schema.columns WHERE ';
|
||||
query += this.buildSchemaConstraint();
|
||||
query += ' AND table_name = ' + this.quoteIdentAsLiteral(this.target.table);
|
||||
query += this.buildTableConstraint(this.target.table);
|
||||
query += ' AND column_name = ' + this.quoteIdentAsLiteral(column);
|
||||
return query;
|
||||
}
|
||||
|
||||
@@ -58,15 +58,7 @@ class GraphElement {
|
||||
|
||||
// panel events
|
||||
this.ctrl.events.on('panel-teardown', this.onPanelTeardown.bind(this));
|
||||
|
||||
/**
|
||||
* Split graph rendering into two parts.
|
||||
* First, calculate series stats in buildFlotPairs() function. Then legend rendering started
|
||||
* (see ctrl.events.on('render') in legend.ts).
|
||||
* When legend is rendered it emits 'legend-rendering-complete' and graph rendered.
|
||||
*/
|
||||
this.ctrl.events.on('render', this.onRender.bind(this));
|
||||
this.ctrl.events.on('legend-rendering-complete', this.onLegendRenderingComplete.bind(this));
|
||||
|
||||
// global events
|
||||
appEvents.on('graph-hover', this.onGraphHover.bind(this), scope);
|
||||
@@ -85,11 +77,20 @@ class GraphElement {
|
||||
if (!this.data) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.annotations = this.ctrl.annotations || [];
|
||||
this.buildFlotPairs(this.data);
|
||||
const graphHeight = this.elem.height();
|
||||
updateLegendValues(this.data, this.panel, graphHeight);
|
||||
|
||||
if (!this.panel.legend.show) {
|
||||
if (this.legendElem.hasChildNodes()) {
|
||||
ReactDOM.unmountComponentAtNode(this.legendElem);
|
||||
}
|
||||
this.renderPanel();
|
||||
return;
|
||||
}
|
||||
|
||||
const { values, min, max, avg, current, total } = this.panel.legend;
|
||||
const { alignAsTable, rightSide, sideWidth, sort, sortDesc, hideEmpty, hideZero } = this.panel.legend;
|
||||
const legendOptions = { alignAsTable, rightSide, sideWidth, sort, sortDesc, hideEmpty, hideZero };
|
||||
@@ -104,12 +105,9 @@ class GraphElement {
|
||||
onColorChange: this.ctrl.onColorChange,
|
||||
onToggleAxis: this.ctrl.onToggleAxis,
|
||||
};
|
||||
const legendReactElem = React.createElement(Legend, legendProps);
|
||||
ReactDOM.render(legendReactElem, this.legendElem, () => this.onLegendRenderingComplete());
|
||||
}
|
||||
|
||||
onLegendRenderingComplete() {
|
||||
this.render_panel();
|
||||
const legendReactElem = React.createElement(Legend, legendProps);
|
||||
ReactDOM.render(legendReactElem, this.legendElem, () => this.renderPanel());
|
||||
}
|
||||
|
||||
onGraphHover(evt) {
|
||||
@@ -281,7 +279,7 @@ class GraphElement {
|
||||
}
|
||||
|
||||
// Function for rendering panel
|
||||
render_panel() {
|
||||
renderPanel() {
|
||||
this.panelWidth = this.elem.width();
|
||||
if (this.shouldAbortRender()) {
|
||||
return;
|
||||
|
||||
@@ -125,7 +125,7 @@ describe('grafanaGraph', () => {
|
||||
|
||||
//Emulate functions called by event listeners
|
||||
link.buildFlotPairs(link.data);
|
||||
link.render_panel();
|
||||
link.renderPanel();
|
||||
ctx.plotData = ctrl.plot.mock.calls[0][1];
|
||||
|
||||
ctx.plotOptions = ctrl.plot.mock.calls[0][2];
|
||||
|
||||
@@ -130,6 +130,33 @@ describe('TimeRegionManager', () => {
|
||||
});
|
||||
});
|
||||
|
||||
plotOptionsScenario('for time from/to region', ctx => {
|
||||
const regions = [{ from: '00:00', to: '05:00', fill: true, colorMode: 'red' }];
|
||||
const from = moment('2018-12-01T00:00+01:00');
|
||||
const to = moment('2018-12-03T23:59+01:00');
|
||||
ctx.setup(regions, from, to);
|
||||
|
||||
it('should add 3 markings', () => {
|
||||
expect(ctx.options.grid.markings.length).toBe(3);
|
||||
});
|
||||
|
||||
it('should add one fill between 00:00 and 05:00 each day', () => {
|
||||
const markings = ctx.options.grid.markings;
|
||||
|
||||
expect(moment(markings[0].xaxis.from).format()).toBe(moment('2018-12-01T01:00:00+01:00').format());
|
||||
expect(moment(markings[0].xaxis.to).format()).toBe(moment('2018-12-01T06:00:00+01:00').format());
|
||||
expect(markings[0].color).toBe(colorModes.red.color.fill);
|
||||
|
||||
expect(moment(markings[1].xaxis.from).format()).toBe(moment('2018-12-02T01:00:00+01:00').format());
|
||||
expect(moment(markings[1].xaxis.to).format()).toBe(moment('2018-12-02T06:00:00+01:00').format());
|
||||
expect(markings[1].color).toBe(colorModes.red.color.fill);
|
||||
|
||||
expect(moment(markings[2].xaxis.from).format()).toBe(moment('2018-12-03T01:00:00+01:00').format());
|
||||
expect(moment(markings[2].xaxis.to).format()).toBe(moment('2018-12-03T06:00:00+01:00').format());
|
||||
expect(markings[2].color).toBe(colorModes.red.color.fill);
|
||||
});
|
||||
});
|
||||
|
||||
plotOptionsScenario('for day of week from/to region', ctx => {
|
||||
const regions = [{ fromDayOfWeek: 7, toDayOfWeek: 7, fill: true, colorMode: 'red' }];
|
||||
const from = moment('2018-01-01T18:45:05+01:00');
|
||||
@@ -211,6 +238,42 @@ describe('TimeRegionManager', () => {
|
||||
});
|
||||
});
|
||||
|
||||
plotOptionsScenario('for day of week from/to time region', ctx => {
|
||||
const regions = [{ fromDayOfWeek: 7, from: '23:00', toDayOfWeek: 1, to: '01:40', fill: true, colorMode: 'red' }];
|
||||
const from = moment('2018-12-07T12:51:19+01:00');
|
||||
const to = moment('2018-12-10T13:51:29+01:00');
|
||||
ctx.setup(regions, from, to);
|
||||
|
||||
it('should add 1 marking', () => {
|
||||
expect(ctx.options.grid.markings.length).toBe(1);
|
||||
});
|
||||
|
||||
it('should add one fill between sunday 23:00 and monday 01:40', () => {
|
||||
const markings = ctx.options.grid.markings;
|
||||
|
||||
expect(moment(markings[0].xaxis.from).format()).toBe(moment('2018-12-10T00:00:00+01:00').format());
|
||||
expect(moment(markings[0].xaxis.to).format()).toBe(moment('2018-12-10T02:40:00+01:00').format());
|
||||
});
|
||||
});
|
||||
|
||||
plotOptionsScenario('for day of week from/to time region', ctx => {
|
||||
const regions = [{ fromDayOfWeek: 6, from: '03:00', toDayOfWeek: 7, to: '02:00', fill: true, colorMode: 'red' }];
|
||||
const from = moment('2018-12-07T12:51:19+01:00');
|
||||
const to = moment('2018-12-10T13:51:29+01:00');
|
||||
ctx.setup(regions, from, to);
|
||||
|
||||
it('should add 1 marking', () => {
|
||||
expect(ctx.options.grid.markings.length).toBe(1);
|
||||
});
|
||||
|
||||
it('should add one fill between saturday 03:00 and sunday 02:00', () => {
|
||||
const markings = ctx.options.grid.markings;
|
||||
|
||||
expect(moment(markings[0].xaxis.from).format()).toBe(moment('2018-12-08T04:00:00+01:00').format());
|
||||
expect(moment(markings[0].xaxis.to).format()).toBe(moment('2018-12-09T03:00:00+01:00').format());
|
||||
});
|
||||
});
|
||||
|
||||
plotOptionsScenario('for day of week from/to time region with daylight saving time', ctx => {
|
||||
const regions = [{ fromDayOfWeek: 7, from: '20:00', toDayOfWeek: 7, to: '23:00', fill: true, colorMode: 'red' }];
|
||||
const from = moment('2018-03-17T06:00:00+01:00');
|
||||
|
||||
@@ -87,6 +87,14 @@ export class TimeRegionManager {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (timeRegion.from && !timeRegion.to) {
|
||||
timeRegion.to = timeRegion.from;
|
||||
}
|
||||
|
||||
if (!timeRegion.from && timeRegion.to) {
|
||||
timeRegion.from = timeRegion.to;
|
||||
}
|
||||
|
||||
hRange = {
|
||||
from: this.parseTimeRange(timeRegion.from),
|
||||
to: this.parseTimeRange(timeRegion.to),
|
||||
@@ -108,21 +116,13 @@ export class TimeRegionManager {
|
||||
hRange.to.dayOfWeek = Number(timeRegion.toDayOfWeek);
|
||||
}
|
||||
|
||||
if (!hRange.from.h && hRange.to.h) {
|
||||
hRange.from = hRange.to;
|
||||
}
|
||||
|
||||
if (hRange.from.h && !hRange.to.h) {
|
||||
hRange.to = hRange.from;
|
||||
}
|
||||
|
||||
if (hRange.from.dayOfWeek && !hRange.from.h && !hRange.from.m) {
|
||||
if (hRange.from.dayOfWeek && hRange.from.h === null && hRange.from.m === null) {
|
||||
hRange.from.h = 0;
|
||||
hRange.from.m = 0;
|
||||
hRange.from.s = 0;
|
||||
}
|
||||
|
||||
if (hRange.to.dayOfWeek && !hRange.to.h && !hRange.to.m) {
|
||||
if (hRange.to.dayOfWeek && hRange.to.h === null && hRange.to.m === null) {
|
||||
hRange.to.h = 23;
|
||||
hRange.to.m = 59;
|
||||
hRange.to.s = 59;
|
||||
@@ -169,8 +169,16 @@ export class TimeRegionManager {
|
||||
fromEnd.add(hRange.to.h - hRange.from.h, 'hours');
|
||||
} else if (hRange.from.h + hRange.to.h < 23) {
|
||||
fromEnd.add(hRange.to.h, 'hours');
|
||||
|
||||
while (fromEnd.hour() !== hRange.to.h) {
|
||||
fromEnd.add(-1, 'hours');
|
||||
}
|
||||
} else {
|
||||
fromEnd.add(24 - hRange.from.h, 'hours');
|
||||
|
||||
while (fromEnd.hour() !== hRange.to.h) {
|
||||
fromEnd.add(1, 'hours');
|
||||
}
|
||||
}
|
||||
|
||||
fromEnd.set('minute', hRange.to.m);
|
||||
|
||||
@@ -107,7 +107,10 @@ class SingleStatCtrl extends MetricsPanelCtrl {
|
||||
}
|
||||
|
||||
onDataReceived(dataList) {
|
||||
const data: any = {};
|
||||
const data: any = {
|
||||
scopedVars: _.extend({}, this.panel.scopedVars),
|
||||
};
|
||||
|
||||
if (dataList.length > 0 && dataList[0].type === 'table') {
|
||||
this.dataType = 'table';
|
||||
const tableData = dataList.map(this.tableHandler.bind(this));
|
||||
@@ -117,6 +120,7 @@ class SingleStatCtrl extends MetricsPanelCtrl {
|
||||
this.series = dataList.map(this.seriesHandler.bind(this));
|
||||
this.setValues(data);
|
||||
}
|
||||
|
||||
this.data = data;
|
||||
this.render();
|
||||
}
|
||||
@@ -320,7 +324,6 @@ class SingleStatCtrl extends MetricsPanelCtrl {
|
||||
}
|
||||
|
||||
// Add $__name variable for using in prefix or postfix
|
||||
data.scopedVars = _.extend({}, this.panel.scopedVars);
|
||||
data.scopedVars['__name'] = { value: this.series[0].label };
|
||||
}
|
||||
this.setValueMapping(data);
|
||||
|
||||
@@ -199,7 +199,6 @@ small,
|
||||
|
||||
mark,
|
||||
.mark {
|
||||
padding: 0.2em;
|
||||
background: $alert-warning-bg;
|
||||
}
|
||||
|
||||
|
||||
@@ -19,16 +19,23 @@ div.flot-text {
|
||||
|
||||
.panel {
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
&--solo {
|
||||
position: fixed;
|
||||
bottom: 0;
|
||||
right: 0;
|
||||
margin: 0;
|
||||
.panel-container {
|
||||
border: none;
|
||||
z-index: $zindex-sidemenu + 1;
|
||||
}
|
||||
.panel-solo {
|
||||
position: fixed;
|
||||
bottom: 0;
|
||||
right: 0;
|
||||
margin: 0;
|
||||
left: 0;
|
||||
top: 0;
|
||||
|
||||
.panel-container {
|
||||
border: none;
|
||||
}
|
||||
|
||||
.panel-menu-toggle,
|
||||
.panel-menu {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,12 @@
|
||||
}
|
||||
}
|
||||
|
||||
@for $i from 1 through 30 {
|
||||
.min-width-#{$i} {
|
||||
min-width: ($spacer * $i) - $gf-form-margin !important;
|
||||
}
|
||||
}
|
||||
|
||||
@for $i from 1 through 30 {
|
||||
.offset-width-#{$i} {
|
||||
margin-left: ($spacer * $i) !important;
|
||||
|
||||
@@ -35,6 +35,8 @@ go run build.go -goarch arm64 -cc ${CCARM64} ${OPT} build
|
||||
go run build.go -goos darwin -cc ${CCOSX64} ${OPT} build
|
||||
|
||||
go run build.go -goos windows -cc ${CCWIN64} ${OPT} build
|
||||
|
||||
# Do not remove CC from the linux build, its there for compatibility with Centos6
|
||||
CC=${CCX64} go run build.go ${OPT} build
|
||||
|
||||
yarn install --pure-lockfile --no-progress
|
||||
@@ -57,7 +59,7 @@ go run build.go ${OPT} build-frontend
|
||||
source /etc/profile.d/rvm.sh
|
||||
|
||||
echo "Packaging"
|
||||
go run build.go -goos linux -pkg-arch amd64 ${OPT} package-only latest
|
||||
go run build.go -goos linux -pkg-arch amd64 ${OPT} package-only
|
||||
#removing amd64 phantomjs bin for armv7/arm64 packages
|
||||
rm tools/phantomjs/phantomjs
|
||||
go run build.go -goos linux -pkg-arch armv7 ${OPT} package-only
|
||||
@@ -78,3 +80,4 @@ else
|
||||
fi
|
||||
go run build.go -goos windows -pkg-arch amd64 ${OPT} package-only
|
||||
|
||||
go run build.go latest
|
||||
@@ -8,6 +8,8 @@ set -e
|
||||
|
||||
EXTRA_OPTS="$@"
|
||||
|
||||
CCARMV7=arm-linux-gnueabihf-gcc
|
||||
CCARM64=aarch64-linux-gnu-gcc
|
||||
CCX64=/tmp/x86_64-centos6-linux-gnu/bin/x86_64-centos6-linux-gnu-gcc
|
||||
|
||||
GOPATH=/go
|
||||
@@ -26,6 +28,9 @@ fi
|
||||
|
||||
echo "Build arguments: $OPT"
|
||||
|
||||
go run build.go -goarch armv7 -cc ${CCARMV7} ${OPT} build
|
||||
go run build.go -goarch arm64 -cc ${CCARM64} ${OPT} build
|
||||
|
||||
CC=${CCX64} go run build.go ${OPT} build
|
||||
|
||||
yarn install --pure-lockfile --no-progress
|
||||
@@ -43,4 +48,8 @@ go run build.go ${OPT} build-frontend
|
||||
source /etc/profile.d/rvm.sh
|
||||
|
||||
echo "Packaging"
|
||||
go run build.go -goos linux -pkg-arch amd64 ${OPT} package-only latest
|
||||
go run build.go -goos linux -pkg-arch amd64 ${OPT} package-only
|
||||
go run build.go -goos linux -pkg-arch armv7 ${OPT} package-only
|
||||
go run build.go -goos linux -pkg-arch arm64 ${OPT} package-only
|
||||
|
||||
go run build.go latest
|
||||
|
||||
@@ -1,5 +1,25 @@
|
||||
FROM circleci/golang:1.11
|
||||
|
||||
RUN git clone https://github.com/aptly-dev/aptly $GOPATH/src/github.com/aptly-dev/aptly && \
|
||||
cd $GOPATH/src/github.com/aptly-dev/aptly && \
|
||||
# pin aptly to a specific commit after 1.3.0 that contains gpg2 support
|
||||
git reset --hard a64807efdaf5e380bfa878c71bc88eae10d62be1 && \
|
||||
make install
|
||||
|
||||
FROM circleci/python:2.7-stretch
|
||||
|
||||
RUN sudo pip install awscli && \
|
||||
curl https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-222.0.0-linux-x86_64.tar.gz | \
|
||||
sudo tar xvzf - -C /opt
|
||||
ENV PATH=$PATH:/opt/google-cloud-sdk/bin
|
||||
|
||||
USER root
|
||||
|
||||
RUN pip install awscli && \
|
||||
curl https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-222.0.0-linux-x86_64.tar.gz | \
|
||||
tar xvzf - -C /opt && \
|
||||
apt update && \
|
||||
apt install -y createrepo expect && \
|
||||
apt-get autoremove -y && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=0 /go/bin/aptly /usr/local/bin/aptly
|
||||
|
||||
USER circleci
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
_version="1.0.0"
|
||||
_version="1.1.0"
|
||||
_tag="grafana/grafana-ci-deploy:${_version}"
|
||||
|
||||
docker build -t $_tag .
|
||||
|
||||
7
scripts/build/load-signing-key.sh
Normal file
7
scripts/build/load-signing-key.sh
Normal file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
git clone git@github.com:torkelo/private.git ~/private-repo
|
||||
gpg --batch --allow-secret-key-import --import ~/private-repo/signing/private.key
|
||||
pkill gpg-agent
|
||||
@@ -1,6 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd ..
|
||||
git clone -b master --single-branch git@github.com:grafana/grafana-enterprise.git --depth 1
|
||||
|
||||
|
||||
if [ -z "$CIRCLE_TAG" ]; then
|
||||
_target="master"
|
||||
else
|
||||
_target="$CIRCLE_TAG"
|
||||
fi
|
||||
|
||||
git clone -b "$_target" --single-branch git@github.com:grafana/grafana-enterprise.git --depth 1
|
||||
|
||||
cd grafana-enterprise
|
||||
./build.sh
|
||||
|
||||
@@ -4,10 +4,10 @@
|
||||
|
||||
EXTRA_OPTS="$@"
|
||||
|
||||
# Right now we hack this in into the publish script.
|
||||
# Right now we hack this in into the publish script.
|
||||
# Eventually we might want to keep a list of all previous releases somewhere.
|
||||
_releaseNoteUrl="https://community.grafana.com/t/release-notes-v5-3-x/10244"
|
||||
_whatsNewUrl="http://docs.grafana.org/guides/whats-new-in-v5-3/"
|
||||
_releaseNoteUrl="https://community.grafana.com/t/release-notes-v5-4-x/12215"
|
||||
_whatsNewUrl="http://docs.grafana.org/guides/whats-new-in-v5-4/"
|
||||
|
||||
./scripts/build/release_publisher/release_publisher \
|
||||
--wn ${_whatsNewUrl} \
|
||||
|
||||
@@ -105,6 +105,6 @@ func TestFileWalker(t *testing.T) {
|
||||
incorrectPackageName := "grafana_5.2.0-474pre1_armfoo.deb"
|
||||
_, err := mapPackage(incorrectPackageName, incorrectPackageName, []byte{})
|
||||
if err == nil {
|
||||
t.Errorf("Testing (%v), expected to fail due to an unrecognized arch, but signalled no error.", incorrectPackageName)
|
||||
t.Errorf("Testing (%v), expected to fail due to an unrecognized arch, but signaled no error.", incorrectPackageName)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,12 +41,12 @@ func main() {
|
||||
var builder releaseBuilder
|
||||
var product string
|
||||
|
||||
archiveProviderRoot := "https://s3-us-west-2.amazonaws.com"
|
||||
archiveProviderRoot := "https://dl.grafana.com"
|
||||
buildArtifacts := completeBuildArtifactConfigurations
|
||||
|
||||
if enterprise {
|
||||
product = "grafana-enterprise"
|
||||
baseUrl = createBaseUrl(archiveProviderRoot, "grafana-enterprise-releases", product, nightly)
|
||||
baseUrl = createBaseUrl(archiveProviderRoot, "enterprise", product, nightly)
|
||||
var err error
|
||||
buildArtifacts, err = filterBuildArtifacts([]artifactFilter{
|
||||
{os: "deb", arch: "amd64"},
|
||||
@@ -61,7 +61,7 @@ func main() {
|
||||
|
||||
} else {
|
||||
product = "grafana"
|
||||
baseUrl = createBaseUrl(archiveProviderRoot, "grafana-releases", product, nightly)
|
||||
baseUrl = createBaseUrl(archiveProviderRoot, "oss", product, nightly)
|
||||
}
|
||||
|
||||
if fromLocal {
|
||||
|
||||
27
scripts/build/update_repo/aptly.conf
Normal file
27
scripts/build/update_repo/aptly.conf
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"rootDir": "/deb-repo/db",
|
||||
"downloadConcurrency": 4,
|
||||
"downloadSpeedLimit": 0,
|
||||
"architectures": [],
|
||||
"dependencyFollowSuggests": false,
|
||||
"dependencyFollowRecommends": false,
|
||||
"dependencyFollowAllVariants": false,
|
||||
"dependencyFollowSource": false,
|
||||
"dependencyVerboseResolve": false,
|
||||
"gpgDisableSign": false,
|
||||
"gpgDisableVerify": false,
|
||||
"gpgProvider": "gpg2",
|
||||
"downloadSourcePackages": false,
|
||||
"skipLegacyPool": true,
|
||||
"ppaDistributorID": "ubuntu",
|
||||
"ppaCodename": "",
|
||||
"skipContentsPublishing": false,
|
||||
"FileSystemPublishEndpoints": {
|
||||
"repo": {
|
||||
"rootDir": "/deb-repo/repo",
|
||||
"linkMethod": "copy"
|
||||
}
|
||||
},
|
||||
"S3PublishEndpoints": {},
|
||||
"SwiftPublishEndpoints": {}
|
||||
}
|
||||
7
scripts/build/update_repo/sign-rpm-repo.sh
Executable file
7
scripts/build/update_repo/sign-rpm-repo.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env expect
|
||||
|
||||
set password [lindex $argv 0]
|
||||
spawn gpg --detach-sign --armor /rpm-repo/repodata/repomd.xml
|
||||
expect "Enter passphrase: "
|
||||
send -- "$password\r"
|
||||
expect eof
|
||||
7
scripts/build/update_repo/unlock-gpg-key.sh
Executable file
7
scripts/build/update_repo/unlock-gpg-key.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env expect
|
||||
|
||||
set password [lindex $argv 0]
|
||||
spawn gpg --detach-sign --armor /tmp/sign-this
|
||||
expect "Enter passphrase: "
|
||||
send -- "$password\r"
|
||||
expect eof
|
||||
58
scripts/build/update_repo/update-deb.sh
Executable file
58
scripts/build/update_repo/update-deb.sh
Executable file
@@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
RELEASE_TYPE="${1:-}"
|
||||
GPG_PASS="${2:-}"
|
||||
RELEASE_TAG="${3:-}"
|
||||
REPO="grafana"
|
||||
|
||||
if [ -z "$RELEASE_TYPE" -o -z "$GPG_PASS" ]; then
|
||||
echo "Both RELEASE_TYPE (arg 1) and GPG_PASS (arg 2) has to be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$RELEASE_TYPE" != "oss" && "$RELEASE_TYPE" != "enterprise" ]]; then
|
||||
echo "RELEASE_TYPE (arg 1) must be either oss or enterprise."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if echo "$RELEASE_TAG" | grep -q "beta"; then
|
||||
REPO="beta"
|
||||
fi
|
||||
|
||||
set -e
|
||||
|
||||
# Setup environment
|
||||
cp scripts/build/update_repo/aptly.conf /etc/aptly.conf
|
||||
mkdir -p /deb-repo/db \
|
||||
/deb-repo/repo \
|
||||
/deb-repo/tmp
|
||||
|
||||
# Download the database
|
||||
gsutil -m rsync -r "gs://grafana-aptly-db/$RELEASE_TYPE" /deb-repo/db
|
||||
|
||||
# Add the new release to the repo
|
||||
aptly publish drop grafana filesystem:repo:grafana || true
|
||||
aptly publish drop beta filesystem:repo:grafana || true
|
||||
cp ./dist/*.deb /deb-repo/tmp
|
||||
rm /deb-repo/tmp/grafana_latest*.deb || true
|
||||
aptly repo add "$REPO" ./dist
|
||||
|
||||
# Setup signing and sign the repo
|
||||
|
||||
echo "allow-loopback-pinentry" > ~/.gnupg/gpg-agent.conf
|
||||
echo "pinentry-mode loopback" > ~/.gnupg/gpg.conf
|
||||
|
||||
touch /tmp/sign-this
|
||||
./scripts/build/update_repo/unlock-gpg-key.sh "$GPG_PASS"
|
||||
rm /tmp/sign-this /tmp/sign-this.asc
|
||||
|
||||
aptly publish repo grafana filesystem:repo:grafana
|
||||
aptly publish repo beta filesystem:repo:grafana
|
||||
|
||||
# Update the repo and db on gcp
|
||||
gsutil -m rsync -r -d /deb-repo/db "gs://grafana-aptly-db/$RELEASE_TYPE"
|
||||
gsutil -m rsync -r -d /deb-repo/repo/grafana "gs://grafana-repo/$RELEASE_TYPE/deb"
|
||||
|
||||
# usage:
|
||||
#
|
||||
# deb https://packages.grafana.com/oss/deb stable main
|
||||
59
scripts/build/update_repo/update-rpm.sh
Executable file
59
scripts/build/update_repo/update-rpm.sh
Executable file
@@ -0,0 +1,59 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
RELEASE_TYPE="${1:-}"
|
||||
GPG_PASS="${2:-}"
|
||||
|
||||
RELEASE_TAG="${3:-}"
|
||||
REPO="rpm"
|
||||
|
||||
if [ -z "$RELEASE_TYPE" -o -z "$GPG_PASS" ]; then
|
||||
echo "Both RELEASE_TYPE (arg 1) and GPG_PASS (arg 2) has to be set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$RELEASE_TYPE" != "oss" && "$RELEASE_TYPE" != "enterprise" ]]; then
|
||||
echo "RELEASE_TYPE (arg 1) must be either oss or enterprise."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if echo "$RELEASE_TAG" | grep -q "beta"; then
|
||||
REPO="rpm-beta"
|
||||
fi
|
||||
|
||||
set -e
|
||||
|
||||
# Setup environment
|
||||
BUCKET="gs://grafana-repo/$RELEASE_TYPE/$REPO"
|
||||
mkdir -p /rpm-repo
|
||||
|
||||
# Download the database
|
||||
gsutil -m rsync -r "$BUCKET" /rpm-repo
|
||||
|
||||
# Add the new release to the repo
|
||||
cp ./dist/*.rpm /rpm-repo
|
||||
rm /rpm-repo/grafana-latest-1*.rpm || true
|
||||
cd /rpm-repo
|
||||
createrepo .
|
||||
|
||||
# Setup signing and sign the repo
|
||||
|
||||
echo "allow-loopback-pinentry" > ~/.gnupg/gpg-agent.conf
|
||||
echo "pinentry-mode loopback" > ~/.gnupg/gpg.conf
|
||||
|
||||
rm /rpm-repo/repodata/repomd.xml.asc || true
|
||||
pkill gpg-agent || true
|
||||
./scripts/build/update_repo/sign-rpm-repo.sh "$GPG_PASS"
|
||||
|
||||
# Update the repo and db on gcp
|
||||
gsutil -m rsync -r -d /rpm-repo "$BUCKET"
|
||||
|
||||
# usage:
|
||||
# [grafana]
|
||||
# name=grafana
|
||||
# baseurl=https://packages.grafana.com/oss/rpm
|
||||
# repo_gpgcheck=1
|
||||
# enabled=1
|
||||
# gpgcheck=1
|
||||
# gpgkey=https://packages.grafana.com/gpg.key
|
||||
# sslverify=1
|
||||
# sslcacert=/etc/pki/tls/certs/ca-bundle.crt
|
||||
Reference in New Issue
Block a user