Compare commits

...

60 Commits

Author SHA1 Message Date
Leonard Gram
653918056c Merge pull request #14870 from grafana/fix-build
build: typo in build config.
2019-01-14 15:26:26 +01:00
Leonard Gram
194153aa62 build: typo in build config. 2019-01-14 15:23:03 +01:00
Leonard Gram
c0400f32ad release 5.4.3. 2019-01-14 14:11:58 +01:00
Leonard Gram
aaac7cc556 build: build specific enterprise version when releasing.
(cherry picked from commit b1f5a232da)
2019-01-14 14:11:58 +01:00
Leonard Gram
c2e708042c build: skip linters. 2019-01-14 14:11:58 +01:00
Leonard Gram
17f4a03d6f build: build improvements from master.
- docker for arm
- upgrade to latest golang
- automated repo deploy
2019-01-14 14:11:58 +01:00
bergquist
5743ceaa93 make sure frequency cannot be zero
frequency set to zero causes division by zero
panics in the alert schedular.

closes #14810

(cherry picked from commit fbb3ad5fc4)
2019-01-14 14:11:58 +01:00
bergquist
0f3a938f0b adds orgId to user dto for provisioned dashboards
(cherry picked from commit 9895b1e6d0)
2019-01-14 14:11:58 +01:00
Torkel Ödegaard
4796ec8cd4 Fixed dashboard links not updating after variable or time range change, fixes #14493
(cherry picked from commit 002f57ae00)
2019-01-14 14:11:58 +01:00
Marcus Efraimsson
a304e5b600 fix signed in user for orgId=0 result should return active org id
(cherry picked from commit e82b3632f6)
2019-01-14 14:11:58 +01:00
Baokun Lee
54b725b91b Raise datasources number to 5000
(cherry picked from commit f51222027d)
2019-01-14 14:11:58 +01:00
Dan Cech
ca88aedbdd only update session in mysql database when required
(cherry picked from commit 3f85901c4a)
2019-01-14 14:11:58 +01:00
bergquist
566c7b17ad upgrade to golang 1.11.4
(cherry picked from commit 17f8be90ae)
2019-01-14 14:11:58 +01:00
Torkel Ödegaard
d812109ebf Merge pull request #14485 from grafana/cp-v5.4.2
Cherry picks for v5.4.2
2018-12-13 13:49:23 +01:00
Torkel Ödegaard
f44a006cb9 updated version 2018-12-13 13:48:54 +01:00
Johannes Schill
43c0405ae1 Filter tags select box on text input #14437
(cherry picked from commit 6bb9415b0e)
2018-12-13 13:31:07 +01:00
Marcus Efraimsson
ed05e9de72 add oauth_auto_login setting to defaults file
Making the setting configurable thru environment variable

(cherry picked from commit b9e91cab0e)
2018-12-13 13:31:04 +01:00
Peter Holmberg
e43f13bc03 Check with lowercase
(cherry picked from commit 29bcdef104)
2018-12-13 13:30:58 +01:00
Torkel Ödegaard
308c818cd7 Merge pull request #14431 from grafana/v5.4.1-cherry-picks
v5.4.1 cherry picks
2018-12-10 15:08:32 +01:00
Marcus Efraimsson
d49d8bf13d fix time regions bugs
(cherry picked from commit 8f26fe0fbb)
2018-12-10 14:54:04 +01:00
Torkel Ödegaard
3701f22d66 bumped version number to v5.4.1 2018-12-10 14:44:52 +01:00
Torkel Ödegaard
4c60ef398a fixed issue with colorpicker position above window, fixes #14412
(cherry picked from commit 0c5fd21327)
2018-12-10 14:39:50 +01:00
Torkel Ödegaard
ec98666de1 fixed issue with singlestat and repeated scopedVars, was only working for time series data sources, and only if there was any series, now scoped vars is always set, fixes #14367
(cherry picked from commit 976d25d6ae)
2018-12-10 14:39:08 +01:00
Torkel Ödegaard
7fe10e2eef fix search tag issues, fixes #14391
(cherry picked from commit e05f6c5397)
2018-12-10 14:38:37 +01:00
Scott Glajch
4a8cd4c023 Add the AWS/SES Cloudwatch metrics of BounceRate and ComplaintRate. Pull request #14399
(cherry picked from commit 62a5cd27ba)
2018-12-10 14:38:16 +01:00
moznion
174be1abab Put issue number to test code
(cherry picked from commit 4397ee61d0)
2018-12-10 14:37:52 +01:00
moznion
4c13e02aef Fix bug what updating user quota doesn't work
Reason is same as 061e06c226

(cherry picked from commit d1e1cde00e)
2018-12-10 14:37:36 +01:00
moznion
4a8a3d40e7 Fix bug what updating org quota doesn't work
3c330c8e4c/pkg/services/sqlstore/quota.go (L106)

In the real use case, `has` that is described by the above code is always `false` because it includes `Updated` in a query.

So this commit fixes this issue.

(cherry picked from commit 061e06c226)
2018-12-10 14:37:26 +01:00
Johannes Schill
01c4b71cfb If user login equals user email, only show the email once #14341
(cherry picked from commit 02b14d33a6)
2018-12-10 14:35:42 +01:00
Johannes Schill
e7cd39a543 UserPicker and TeamPicker should use min-width instead of fixed widths to avoid overflowing form buttons. #14341
(cherry picked from commit 114a264da4)
2018-12-10 14:35:31 +01:00
Torkel Ödegaard
6f241a4bac fix for panel-initialized event not being called
(cherry picked from commit 757cada4a5)
2018-12-10 14:34:46 +01:00
Brian Gann
0a19581c48 redact value for plugin proxy routes
(cherry picked from commit 02365514f9)
2018-12-10 14:34:25 +01:00
Torkel Ödegaard
96cb4df83a fix for panel embedding. Solo panel height was not correctly set. Made panel--solo into panel-solo class. in develop branch we have remove the need for the panel class
(cherry picked from commit e31490ac68)
2018-12-10 14:34:07 +01:00
Tom Nitti
7820775a53 added support for influxdb cumulative_sum function in tsdb
(cherry picked from commit 1e53c12921)
2018-12-10 14:33:41 +01:00
Dominik Henneke
9699133501 Use buildTableConstraint instead of buildSchemaConstraint to find the datatype of a column if using a table from a different database schema
(cherry picked from commit b450b778cb)
2018-12-10 14:32:55 +01:00
flopp999
80ecd8ea8e fixedUnit for Flow:l/min and mL/min
got stranged result with decimalSIPrefix
(cherry picked from commit 8caeb13026)
2018-12-10 14:32:31 +01:00
Marcus Efraimsson
2ab2259091 fix time regions using zero hours
(cherry picked from commit ad33cd5c5c)
2018-12-10 14:32:08 +01:00
Kornelijus Survila
deb305b95f dataproxy: Override incoming Authorization header
(cherry picked from commit 0cafd9a663)
2018-12-10 14:31:28 +01:00
Leonard Gram
d42c17efad build: update latest when pushing docker.
(cherry picked from commit 9a771555f3)
2018-12-04 11:17:50 +01:00
Leonard Gram
972aaef2a6 build: always test publisher.
(cherry picked from commit cb0d58c6f1)
2018-12-03 15:18:10 +01:00
Leonard Gram
ce3982d406 build: packages linked to dl.grafana.com.
(cherry picked from commit ff0730ca1a)
2018-12-03 15:18:10 +01:00
Marcus Efraimsson
69c5191926 Merge pull request #14212 from grafana/cp-5.4.0
Cherry picks for v5.4.0
2018-12-03 02:17:42 -08:00
Marcus Efraimsson
99ee3bbe5a release v5.4.0 2018-12-03 10:53:20 +01:00
Marcus Efraimsson
01840cbd70 let each sql datasource handle timeFrom and timeTo macros
(cherry picked from commit 624e5e5b3d)
2018-11-30 16:27:36 +01:00
Matthew Coltman
23b19543bd Add AWS/CodeBuild namespace for CloudWatch datasource
(cherry picked from commit bbd0ec3a8b)
2018-11-30 16:27:07 +01:00
Ryan McKinley
bb4e5934fb check for null with toLocalString (#14208)
(cherry picked from commit b3e6da0cbd)
2018-11-28 10:23:43 +01:00
Torkel Ödegaard
fd3821d2f1 Fix elastic ng-inject (build issue) (#14195)
fix elastic ng-inject issue in query editor

(cherry picked from commit 2faf8c722f)
2018-11-28 10:23:22 +01:00
Leonard Gram
8b1d0b14b6 docker: Upgrades base packages in the images.
Related to #14182

(cherry picked from commit bccce9922a)
2018-11-28 10:22:39 +01:00
Marcus Efraimsson
23c6bea21b return actual error if failing to update alert data
(cherry picked from commit 36aec52c08)
2018-11-28 10:21:26 +01:00
Marcus Efraimsson
e3abefa19f fix handle of elasticsearch 6.0+ version
(cherry picked from commit a022284cb0)
2018-11-28 10:20:41 +01:00
flopp999
4ee92bd59c Fix abbreviations of Litre/min and milliLitre/min (#14114)
(cherry picked from commit 91d97ab5b5)
2018-11-28 10:20:04 +01:00
Marcus Efraimsson
780e5153d0 cloudwatch: handle invalid time ranges
(cherry picked from commit 3534762f49)
2018-11-28 10:00:23 +01:00
Marcus Efraimsson
be9058d7ef cloudwatch: recover/handle panics when executing queries
(cherry picked from commit 879aed7d06)
2018-11-28 10:00:16 +01:00
bergquist
3301f96811 updates time range options for alert queries
add some new options for the to value.
removes '1s' option for from since thats unreasonable low

closes #12134

(cherry picked from commit 14688766ef)
2018-11-28 09:59:52 +01:00
bergquist
1c59669da0 format: remove </input> and align tabs
(cherry picked from commit 60c291c8dc)
2018-11-28 09:59:45 +01:00
Torkel Ödegaard
1ad60be47b fixed failing graph tests
(cherry picked from commit 0731b8635b)
2018-11-28 09:59:10 +01:00
Torkel Ödegaard
9ec0af73ec fixed issue with new legend not checking if panel.legend.show
(cherry picked from commit cab92f88af)
2018-11-28 09:59:00 +01:00
Leonard Gram
8190d10827 build: docker build for ge.
(cherry picked from commit 6c267cb592)
2018-11-21 09:28:15 +01:00
Marcus Efraimsson
18b5f630f7 update release publish script links
(cherry picked from commit e421c387ea)
2018-11-20 16:24:59 +01:00
Marcus Efraimsson
9df26af3db release v5.4.0-beta1 2018-11-20 14:08:27 +01:00
90 changed files with 937 additions and 294 deletions

View File

@@ -19,7 +19,7 @@ version: 2
jobs:
mysql-integration-test:
docker:
- image: circleci/golang:1.11
- image: circleci/golang:1.11.4
- image: circleci/mysql:5.6-ram
environment:
MYSQL_ROOT_PASSWORD: rootpass
@@ -39,7 +39,7 @@ jobs:
postgres-integration-test:
docker:
- image: circleci/golang:1.11
- image: circleci/golang:1.11.4
- image: circleci/postgres:9.3-ram
environment:
POSTGRES_USER: grafanatest
@@ -74,7 +74,7 @@ jobs:
gometalinter:
docker:
- image: circleci/golang:1.11
- image: circleci/golang:1.11.4
environment:
# we need CGO because of go-sqlite3
CGO_ENABLED: 1
@@ -89,9 +89,6 @@ jobs:
- run: 'go get -u github.com/opennota/check/cmd/structcheck'
- run: 'go get -u github.com/mdempsky/unconvert'
- run: 'go get -u github.com/opennota/check/cmd/varcheck'
- run:
name: run linters
command: 'gometalinter --enable-gc --vendor --deadline 10m --disable-all --enable=deadcode --enable=goconst --enable=gofmt --enable=ineffassign --enable=megacheck --enable=structcheck --enable=unconvert --enable=varcheck ./...'
- run:
name: run go vet
command: 'go vet ./pkg/...'
@@ -117,7 +114,7 @@ jobs:
test-backend:
docker:
- image: circleci/golang:1.11
- image: circleci/golang:1.11.4
working_directory: /go/src/github.com/grafana/grafana
steps:
- checkout
@@ -127,7 +124,7 @@ jobs:
build-all:
docker:
- image: grafana/build-container:1.2.1
- image: grafana/build-container:1.2.2
working_directory: /go/src/github.com/grafana/grafana
steps:
- checkout
@@ -162,8 +159,8 @@ jobs:
name: Build Grafana.com master publisher
command: 'go build -o scripts/publish scripts/build/publish.go'
- run:
name: Build Grafana.com release publisher
command: 'cd scripts/build/release_publisher && go build -o release_publisher .'
name: Test and build Grafana.com release publisher
command: 'cd scripts/build/release_publisher && go test . && go build -o release_publisher .'
- persist_to_workspace:
root: .
paths:
@@ -175,7 +172,7 @@ jobs:
build:
docker:
- image: grafana/build-container:1.2.1
- image: grafana/build-container:1.2.2
working_directory: /go/src/github.com/grafana/grafana
steps:
- checkout
@@ -191,57 +188,60 @@ jobs:
- run:
name: sha-sum packages
command: 'go run build.go sha-dist'
- run:
name: Test Grafana.com release publisher
command: 'cd scripts/build/release_publisher && go test .'
- persist_to_workspace:
root: .
paths:
- dist/grafana*
grafana-docker-master:
docker:
- image: docker:stable-git
machine:
image: circleci/classic:201808-01
steps:
- checkout
- attach_workspace:
at: .
- setup_remote_docker
- run: docker info
- run: cp dist/grafana-latest.linux-x64.tar.gz packaging/docker
- run: docker run --privileged linuxkit/binfmt:v0.6
- run: cp dist/grafana-latest.linux-*.tar.gz packaging/docker
- run: cd packaging/docker && ./build-deploy.sh "master-${CIRCLE_SHA1}"
- run: rm packaging/docker/grafana-latest.linux-x64.tar.gz
- run: rm packaging/docker/grafana-latest.linux-*.tar.gz
- run: cp enterprise-dist/grafana-enterprise-*.linux-amd64.tar.gz packaging/docker/grafana-latest.linux-x64.tar.gz
- run: cd packaging/docker && ./build-enterprise.sh "master"
grafana-docker-pr:
docker:
- image: docker:stable-git
machine:
image: circleci/classic:201808-01
steps:
- checkout
- attach_workspace:
at: .
- setup_remote_docker
- run: docker info
- run: cp dist/grafana-latest.linux-x64.tar.gz packaging/docker
- run: docker run --privileged linuxkit/binfmt:v0.6
- run: cp dist/grafana-latest.linux-*.tar.gz packaging/docker
- run: cd packaging/docker && ./build.sh "${CIRCLE_SHA1}"
grafana-docker-release:
docker:
- image: docker:stable-git
steps:
- checkout
- attach_workspace:
at: .
- setup_remote_docker
- run: docker info
- run: cp dist/grafana-latest.linux-x64.tar.gz packaging/docker
- run: cd packaging/docker && ./build-deploy.sh "${CIRCLE_TAG}"
- run: rm packaging/docker/grafana-latest.linux-x64.tar.gz
- run: cp enterprise-dist/grafana-enterprise-*.linux-amd64.tar.gz packaging/docker/grafana-latest.linux-x64.tar.gz
- run: cd packaging/docker && ./build-enterprise.sh "${CIRCLE_TAG}"
machine:
image: circleci/classic:201808-01
steps:
- checkout
- attach_workspace:
at: .
- run: docker info
- run: docker run --privileged linuxkit/binfmt:v0.6
- run: cp dist/grafana-latest.linux-*.tar.gz packaging/docker
- run: cd packaging/docker && ./build-deploy.sh "${CIRCLE_TAG}"
- run: rm packaging/docker/grafana-latest.linux-*.tar.gz
- run: cp enterprise-dist/grafana-enterprise-*.linux-amd64.tar.gz packaging/docker/grafana-latest.linux-x64.tar.gz
- run: cd packaging/docker && ./build-enterprise.sh "${CIRCLE_TAG}"
build-enterprise:
docker:
- image: grafana/build-container:1.2.1
- image: grafana/build-container:1.2.2
working_directory: /go/src/github.com/grafana/grafana
steps:
- checkout
@@ -273,7 +273,7 @@ jobs:
build-all-enterprise:
docker:
- image: grafana/build-container:1.2.1
- image: grafana/build-container:1.2.2
working_directory: /go/src/github.com/grafana/grafana
steps:
- checkout
@@ -320,7 +320,7 @@ jobs:
deploy-enterprise-master:
docker:
- image: grafana/grafana-ci-deploy:1.0.0
- image: grafana/grafana-ci-deploy:1.1.0
steps:
- attach_workspace:
at: .
@@ -343,7 +343,7 @@ jobs:
deploy-enterprise-release:
docker:
- image: grafana/grafana-ci-deploy:1.0.0
- image: grafana/grafana-ci-deploy:1.1.0
steps:
- attach_workspace:
at: .
@@ -362,10 +362,20 @@ jobs:
- run:
name: Deploy to Grafana.com
command: './scripts/build/publish.sh --enterprise'
- run:
name: Load GPG private key
comand: './scripts/build/load-signing-key.sh'
- run:
name: Update Debian repository
command: './scripts/build/update_repo/update-deb.sh "enterprise" "$GPG_KEY_PASSWORD" "$CIRCLE_TAG"'
- run:
name: Update RPM repository
command: './scripts/build/update_repo/update-rpm.sh "enterprise" "$GPG_KEY_PASSWORD" "$CIRCLE_TAG"'
deploy-master:
docker:
- image: grafana/grafana-ci-deploy:1.0.0
- image: grafana/grafana-ci-deploy:1.1.0
steps:
- attach_workspace:
at: .
@@ -395,8 +405,9 @@ jobs:
deploy-release:
docker:
- image: grafana/grafana-ci-deploy:1.0.0
- image: grafana/grafana-ci-deploy:1.1.0
steps:
- checkout
- attach_workspace:
at: .
- run:
@@ -414,6 +425,15 @@ jobs:
- run:
name: Deploy to Grafana.com
command: './scripts/build/publish.sh'
- run:
name: Load GPG private key
command: './scripts/build/load-signing-key.sh'
- run:
name: Update Debian repository
command: './scripts/build/update_repo/update-deb.sh "oss" "$GPG_KEY_PASSWORD" "$CIRCLE_TAG"'
- run:
name: Update RPM repository
command: './scripts/build/update_repo/update-rpm.sh "oss" "$GPG_KEY_PASSWORD" "$CIRCLE_TAG"'
workflows:
version: 2
@@ -510,6 +530,7 @@ workflows:
- grafana-docker-release:
requires:
- build-all
- build-all-enterprise
- test-backend
- test-frontend
- codespell

View File

@@ -1,5 +1,5 @@
# Golang build container
FROM golang:1.11
FROM golang:1.11.4
WORKDIR $GOPATH/src/github.com/grafana/grafana
@@ -50,7 +50,8 @@ ENV PATH=/usr/share/grafana/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bi
WORKDIR $GF_PATHS_HOME
RUN apt-get update && apt-get install -qq -y libfontconfig ca-certificates && \
RUN apt-get update && apt-get upgrade -y && \
apt-get install -qq -y libfontconfig ca-certificates && \
apt-get autoremove -y && \
rm -rf /var/lib/apt/lists/*

View File

@@ -7,7 +7,7 @@ clone_folder: c:\gopath\src\github.com\grafana\grafana
environment:
nodejs_version: "8"
GOPATH: C:\gopath
GOVERSION: 1.11
GOVERSION: 1.11.4
install:
- rmdir c:\go /s /q

View File

@@ -164,6 +164,8 @@ func makeLatestDistCopies() {
"_amd64.deb": "dist/grafana_latest_amd64.deb",
".x86_64.rpm": "dist/grafana-latest-1.x86_64.rpm",
".linux-amd64.tar.gz": "dist/grafana-latest.linux-x64.tar.gz",
".linux-armv7.tar.gz": "dist/grafana-latest.linux-armv7.tar.gz",
".linux-arm64.tar.gz": "dist/grafana-latest.linux-arm64.tar.gz",
}
for _, file := range files {

View File

@@ -246,6 +246,10 @@ disable_signout_menu = false
# URL to redirect the user to after sign out
signout_redirect_url =
# Set to true to attempt login with OAuth automatically, skipping the login screen.
# This setting is ignored if multiple OAuth providers are configured.
oauth_auto_login = false
#################################### Anonymous Auth ######################
[auth.anonymous]
# enable anonymous access

View File

@@ -223,6 +223,10 @@ log_queries =
# URL to redirect the user to after sign out
;signout_redirect_url =
# Set to true to attempt login with OAuth automatically, skipping the login screen.
# This setting is ignored if multiple OAuth providers are configured.
;oauth_auto_login = false
#################################### Anonymous Auth ##########################
[auth.anonymous]
# enable anonymous access

View File

@@ -133,9 +133,9 @@ Macro example | Description
------------ | -------------
*$__time(dateColumn)* | Will be replaced by an expression to convert to a UNIX timestamp and rename the column to `time_sec`. For example, *UNIX_TIMESTAMP(dateColumn) as time_sec*
*$__timeEpoch(dateColumn)* | Will be replaced by an expression to convert to a UNIX timestamp and rename the column to `time_sec`. For example, *UNIX_TIMESTAMP(dateColumn) as time_sec*
*$__timeFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name. For example, *dateColumn BETWEEN '2017-04-21T05:01:17Z' AND '2017-04-21T05:06:17Z'*
*$__timeFrom()* | Will be replaced by the start of the currently active time selection. For example, *'2017-04-21T05:01:17Z'*
*$__timeTo()* | Will be replaced by the end of the currently active time selection. For example, *'2017-04-21T05:06:17Z'*
*$__timeFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name. For example, *dateColumn BETWEEN FROM_UNIXTIME(1494410783) AND FROM_UNIXTIME(1494410983)*
*$__timeFrom()* | Will be replaced by the start of the currently active time selection. For example, *FROM_UNIXTIME(1494410783)*
*$__timeTo()* | Will be replaced by the end of the currently active time selection. For example, *FROM_UNIXTIME(1494410983)*
*$__timeGroup(dateColumn,'5m')* | Will be replaced by an expression usable in GROUP BY clause. For example, *cast(cast(UNIX_TIMESTAMP(dateColumn)/(300) as signed)*300 as signed),*
*$__timeGroup(dateColumn,'5m', 0)* | Same as above but with a fill parameter so missing points in that series will be added by grafana and 0 will be used as value.
*$__timeGroup(dateColumn,'5m', NULL)* | Same as above but NULL will be used as value for missing points.

View File

@@ -4,7 +4,7 @@
"company": "Grafana Labs"
},
"name": "grafana",
"version": "5.4.0-pre1",
"version": "5.4.3",
"repository": {
"type": "git",
"url": "http://github.com/grafana/grafana.git"

View File

@@ -1,4 +1,5 @@
FROM debian:stretch-slim
ARG BASE_IMAGE=debian:stretch-slim
FROM ${BASE_IMAGE}
ARG GRAFANA_TGZ="grafana-latest.linux-x64.tar.gz"
@@ -10,7 +11,8 @@ COPY ${GRAFANA_TGZ} /tmp/grafana.tar.gz
RUN mkdir /tmp/grafana && tar xfvz /tmp/grafana.tar.gz --strip-components=1 -C /tmp/grafana
FROM debian:stretch-slim
ARG BASE_IMAGE=debian:stretch-slim
FROM ${BASE_IMAGE}
ARG GF_UID="472"
ARG GF_GID="472"
@@ -25,7 +27,8 @@ ENV PATH=/usr/share/grafana/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bi
WORKDIR $GF_PATHS_HOME
RUN apt-get update && apt-get install -qq -y libfontconfig ca-certificates curl && \
RUN apt-get update && apt-get -y upgrade && \
apt-get install -qq -y libfontconfig ca-certificates curl && \
apt-get autoremove -y && \
rm -rf /var/lib/apt/lists/*

View File

@@ -8,6 +8,5 @@ docker login -u "$DOCKER_USER" -p "$DOCKER_PASS"
./push_to_docker_hub.sh "$_grafana_version"
if echo "$_grafana_version" | grep -q "^master-"; then
apk add --no-cache curl
./deploy_to_k8s.sh "grafana/grafana-dev:$_grafana_version"
fi

View File

@@ -1,12 +1,25 @@
#!/bin/sh
set -e
_grafana_tag=$1
_raw_grafana_tag=$1
_docker_repo=${2:-grafana/grafana-enterprise}
if echo "$_raw_grafana_tag" | grep -q "^v"; then
_grafana_tag=$(echo "${_raw_grafana_tag}" | cut -d "v" -f 2)
else
_grafana_tag="${_raw_grafana_tag}"
fi
echo "Building and deploying ${_docker_repo}:${_grafana_tag}"
docker build \
--tag "${_docker_repo}:${_grafana_tag}"\
--no-cache=true \
.
docker push "${_docker_repo}:${_grafana_tag}"
if echo "$_raw_grafana_tag" | grep -q "^v" && echo "$_raw_grafana_tag" | grep -qv "beta"; then
docker tag "${_docker_repo}:${_grafana_tag}" "${_docker_repo}:latest"
docker push "${_docker_repo}:latest"
fi

View File

@@ -1,25 +1,49 @@
#!/bin/sh
_grafana_tag=$1
_grafana_tag=${1:-}
_docker_repo=${2:-grafana/grafana}
# If the tag starts with v, treat this as a official release
if echo "$_grafana_tag" | grep -q "^v"; then
_grafana_version=$(echo "${_grafana_tag}" | cut -d "v" -f 2)
_docker_repo=${2:-grafana/grafana}
else
_grafana_version=$_grafana_tag
_docker_repo=${2:-grafana/grafana-dev}
fi
echo "Building ${_docker_repo}:${_grafana_version}"
docker build \
--tag "${_docker_repo}:${_grafana_version}" \
--no-cache=true .
export DOCKER_CLI_EXPERIMENTAL=enabled
# Build grafana image for a specific arch
docker_build () {
base_image=$1
grafana_tgz=$2
tag=$3
docker build \
--build-arg BASE_IMAGE=${base_image} \
--build-arg GRAFANA_TGZ=${grafana_tgz} \
--tag "${tag}" \
--no-cache=true .
}
# Tag docker images of all architectures
docker_tag_all () {
repo=$1
tag=$2
docker tag "${_docker_repo}:${_grafana_version}" "${repo}:${tag}"
docker tag "${_docker_repo}-arm32v7-linux:${_grafana_version}" "${repo}-arm32v7-linux:${tag}"
docker tag "${_docker_repo}-arm64v8-linux:${_grafana_version}" "${repo}-arm64v8-linux:${tag}"
}
docker_build "debian:stretch-slim" "grafana-latest.linux-x64.tar.gz" "${_docker_repo}:${_grafana_version}"
docker_build "arm32v7/debian:stretch-slim" "grafana-latest.linux-armv7.tar.gz" "${_docker_repo}-arm32v7-linux:${_grafana_version}"
docker_build "arm64v8/debian:stretch-slim" "grafana-latest.linux-arm64.tar.gz" "${_docker_repo}-arm64v8-linux:${_grafana_version}"
# Tag as 'latest' for official release; otherwise tag as grafana/grafana:master
if echo "$_grafana_tag" | grep -q "^v"; then
docker tag "${_docker_repo}:${_grafana_version}" "${_docker_repo}:latest"
docker_tag_all "${_docker_repo}" "latest"
else
docker tag "${_docker_repo}:${_grafana_version}" "grafana/grafana:master"
docker_tag_all "${_docker_repo}" "master"
docker tag "${_docker_repo}:${_grafana_version}" "grafana/grafana-dev:${_grafana_version}"
fi

View File

@@ -1,24 +1,46 @@
#!/bin/sh
set -e
_grafana_tag=$1
_grafana_tag=${1:-}
_docker_repo=${2:-grafana/grafana}
# If the tag starts with v, treat this as a official release
if echo "$_grafana_tag" | grep -q "^v"; then
_grafana_version=$(echo "${_grafana_tag}" | cut -d "v" -f 2)
_docker_repo=${2:-grafana/grafana}
else
_grafana_version=$_grafana_tag
_docker_repo=${2:-grafana/grafana-dev}
fi
export DOCKER_CLI_EXPERIMENTAL=enabled
echo "pushing ${_docker_repo}:${_grafana_version}"
docker push "${_docker_repo}:${_grafana_version}"
docker_push_all () {
repo=$1
tag=$2
# Push each image individually
docker push "${repo}:${tag}"
docker push "${repo}-arm32v7-linux:${tag}"
docker push "${repo}-arm64v8-linux:${tag}"
# Create and push a multi-arch manifest
docker manifest create "${repo}:${tag}" \
"${repo}:${tag}" \
"${repo}-arm32v7-linux:${tag}" \
"${repo}-arm64v8-linux:${tag}"
docker manifest push "${repo}:${tag}"
}
if echo "$_grafana_tag" | grep -q "^v" && echo "$_grafana_tag" | grep -vq "beta"; then
echo "pushing ${_docker_repo}:latest"
docker push "${_docker_repo}:latest"
docker_push_all "${_docker_repo}" "latest"
docker_push_all "${_docker_repo}" "${_grafana_version}"
elif echo "$_grafana_tag" | grep -q "^v" && echo "$_grafana_tag" | grep -q "beta"; then
docker_push_all "${_docker_repo}" "${_grafana_version}"
elif echo "$_grafana_tag" | grep -q "master"; then
echo "pushing grafana/grafana:master"
docker push grafana/grafana:master
docker_push_all "${_docker_repo}" "master"
docker push "grafana/grafana-dev:${_grafana_version}"
fi

View File

@@ -1,17 +1,17 @@
#! /usr/bin/env bash
version=5.0.2
version=5.4.3
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${version}_amd64.deb
# wget https://dl.grafana.com/oss/release/grafana_${version}_amd64.deb
#
# package_cloud push grafana/stable/debian/jessie grafana_${version}_amd64.deb
# package_cloud push grafana/stable/debian/wheezy grafana_${version}_amd64.deb
# package_cloud push grafana/stable/debian/stretch grafana_${version}_amd64.deb
#
# package_cloud push grafana/testing/debian/jessie grafana_${version}_amd64.deb
# package_cloud push grafana/testing/debian/wheezy grafana_${version}_amd64.deb --verbose
# package_cloud push grafana/testing/debian/stretch grafana_${version}_amd64.deb --verbose
package_cloud push grafana/stable/debian/jessie grafana_${version}_amd64.deb
package_cloud push grafana/stable/debian/wheezy grafana_${version}_amd64.deb
package_cloud push grafana/stable/debian/stretch grafana_${version}_amd64.deb
package_cloud push grafana/testing/debian/jessie grafana_${version}_amd64.deb
package_cloud push grafana/testing/debian/wheezy grafana_${version}_amd64.deb --verbose
package_cloud push grafana/testing/debian/stretch grafana_${version}_amd64.deb --verbose
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-${version}-1.x86_64.rpm
wget https://dl.grafana.com/oss/release/grafana-${version}-1.x86_64.rpm
package_cloud push grafana/testing/el/6 grafana-${version}-1.x86_64.rpm --verbose
package_cloud push grafana/testing/el/7 grafana-${version}-1.x86_64.rpm --verbose

View File

@@ -277,10 +277,6 @@ func PostDashboard(c *m.ReqContext, cmd m.SaveDashboardCommand) Response {
return Error(500, "Failed to save dashboard", err)
}
if err == m.ErrDashboardFailedToUpdateAlertData {
return Error(500, "Invalid alert data. Cannot save dashboard", err)
}
c.TimeRequest(metrics.M_Api_Dashboard_Save)
return JSON(200, util.DynMap{
"status": "success",

View File

@@ -727,7 +727,6 @@ func TestDashboardApiEndpoint(t *testing.T) {
{SaveError: m.ErrDashboardTitleEmpty, ExpectedStatusCode: 400},
{SaveError: m.ErrDashboardFolderCannotHaveParent, ExpectedStatusCode: 400},
{SaveError: alerting.ValidationError{Reason: "Mu"}, ExpectedStatusCode: 422},
{SaveError: m.ErrDashboardFailedToUpdateAlertData, ExpectedStatusCode: 500},
{SaveError: m.ErrDashboardFailedGenerateUniqueUid, ExpectedStatusCode: 500},
{SaveError: m.ErrDashboardTypeMismatch, ExpectedStatusCode: 400},
{SaveError: m.ErrDashboardFolderWithSameNameAsDashboard, ExpectedStatusCode: 400},

View File

@@ -51,7 +51,7 @@ func ApplyRoute(ctx context.Context, req *http.Request, proxyPath string, route
if token, err := tokenProvider.getAccessToken(data); err != nil {
logger.Error("Failed to get access token", "error", err)
} else {
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token))
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
}
}
@@ -60,7 +60,7 @@ func ApplyRoute(ctx context.Context, req *http.Request, proxyPath string, route
if token, err := tokenProvider.getJwtAccessToken(ctx, data); err != nil {
logger.Error("Failed to get access token", "error", err)
} else {
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token))
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
}
}
@@ -73,7 +73,7 @@ func ApplyRoute(ctx context.Context, req *http.Request, proxyPath string, route
if err != nil {
logger.Error("Failed to get default access token from meta data server", "error", err)
} else {
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.AccessToken))
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.AccessToken))
}
}
}

View File

@@ -87,7 +87,7 @@ func NewApiPluginProxy(ctx *m.ReqContext, proxyPath string, route *plugins.AppPl
}
for key, value := range headers {
log.Trace("setting key %v value %v", key, value[0])
log.Trace("setting key %v value <redacted>", key)
req.Header.Set(key, value[0])
}
}

View File

@@ -21,7 +21,6 @@ var (
ErrDashboardVersionMismatch = errors.New("The dashboard has been changed by someone else")
ErrDashboardTitleEmpty = errors.New("Dashboard title cannot be empty")
ErrDashboardFolderCannotHaveParent = errors.New("A Dashboard Folder cannot be added to another folder")
ErrDashboardFailedToUpdateAlertData = errors.New("Failed to save alert data")
ErrDashboardsWithSameSlugExists = errors.New("Multiple dashboards with the same slug exists")
ErrDashboardFailedGenerateUniqueUid = errors.New("Failed to generate unique dashboard id")
ErrDashboardTypeMismatch = errors.New("Dashboard cannot be changed to a folder")

View File

@@ -112,7 +112,7 @@ func (e *DashAlertExtractor) getAlertFromPanels(jsonWithPanels *simplejson.Json,
frequency, err := getTimeDurationStringToSeconds(jsonAlert.Get("frequency").MustString())
if err != nil {
return nil, ValidationError{Reason: "Could not parse frequency"}
return nil, ValidationError{Reason: err.Error()}
}
rawFor := jsonAlert.Get("for").MustString()

View File

@@ -1,16 +1,21 @@
package alerting
import (
"errors"
"fmt"
"regexp"
"strconv"
"time"
"github.com/grafana/grafana/pkg/components/simplejson"
m "github.com/grafana/grafana/pkg/models"
)
var (
ErrFrequencyCannotBeZeroOrLess = errors.New(`"evaluate every" cannot be zero or below`)
ErrFrequencyCouldNotBeParsed = errors.New(`"evaluate every" field could not be parsed`)
)
type Rule struct {
Id int64
OrgId int64
@@ -76,7 +81,7 @@ func getTimeDurationStringToSeconds(str string) (int64, error) {
matches := ValueFormatRegex.FindAllString(str, 1)
if len(matches) <= 0 {
return 0, fmt.Errorf("Frequency could not be parsed")
return 0, ErrFrequencyCouldNotBeParsed
}
value, err := strconv.Atoi(matches[0])
@@ -84,6 +89,10 @@ func getTimeDurationStringToSeconds(str string) (int64, error) {
return 0, err
}
if value == 0 {
return 0, ErrFrequencyCannotBeZeroOrLess
}
unit := UnitFormatRegex.FindAllString(str, 1)[0]
if val, ok := unitMultiplier[unit]; ok {
@@ -101,7 +110,6 @@ func NewRuleFromDBAlert(ruleDef *m.Alert) (*Rule, error) {
model.PanelId = ruleDef.PanelId
model.Name = ruleDef.Name
model.Message = ruleDef.Message
model.Frequency = ruleDef.Frequency
model.State = ruleDef.State
model.LastStateChange = ruleDef.NewStateDate
model.For = ruleDef.For
@@ -109,6 +117,13 @@ func NewRuleFromDBAlert(ruleDef *m.Alert) (*Rule, error) {
model.ExecutionErrorState = m.ExecutionErrorOption(ruleDef.Settings.Get("executionErrorState").MustString("alerting"))
model.StateChanges = ruleDef.StateChanges
model.Frequency = ruleDef.Frequency
// frequency cannot be zero since that would not execute the alert rule.
// so we fallback to 60 seconds if `Freqency` is missing
if model.Frequency == 0 {
model.Frequency = 60
}
for _, v := range ruleDef.Settings.Get("notifications").MustArray() {
jsonModel := simplejson.NewFromAny(v)
id, err := jsonModel.Get("id").Int64()

View File

@@ -14,6 +14,36 @@ func (f *FakeCondition) Eval(context *EvalContext) (*ConditionResult, error) {
return &ConditionResult{}, nil
}
func TestAlertRuleFrequencyParsing(t *testing.T) {
tcs := []struct {
input string
err error
result int64
}{
{input: "10s", result: 10},
{input: "10m", result: 600},
{input: "1h", result: 3600},
{input: "1o", result: 1},
{input: "0s", err: ErrFrequencyCannotBeZeroOrLess},
{input: "0m", err: ErrFrequencyCannotBeZeroOrLess},
{input: "0h", err: ErrFrequencyCannotBeZeroOrLess},
{input: "0", err: ErrFrequencyCannotBeZeroOrLess},
{input: "-1s", err: ErrFrequencyCouldNotBeParsed},
}
for _, tc := range tcs {
r, err := getTimeDurationStringToSeconds(tc.input)
if err != tc.err {
t.Errorf("expected error: '%v' got: '%v'", tc.err, err)
return
}
if r != tc.result {
t.Errorf("expected result: %d got %d", tc.result, r)
}
}
}
func TestAlertRuleModel(t *testing.T) {
Convey("Testing alert rule", t, func() {
@@ -21,26 +51,6 @@ func TestAlertRuleModel(t *testing.T) {
return &FakeCondition{}, nil
})
Convey("Can parse seconds", func() {
seconds, _ := getTimeDurationStringToSeconds("10s")
So(seconds, ShouldEqual, 10)
})
Convey("Can parse minutes", func() {
seconds, _ := getTimeDurationStringToSeconds("10m")
So(seconds, ShouldEqual, 600)
})
Convey("Can parse hours", func() {
seconds, _ := getTimeDurationStringToSeconds("1h")
So(seconds, ShouldEqual, 3600)
})
Convey("defaults to seconds", func() {
seconds, _ := getTimeDurationStringToSeconds("1o")
So(seconds, ShouldEqual, 1)
})
Convey("should return err for empty string", func() {
_, err := getTimeDurationStringToSeconds("")
So(err, ShouldNotBeNil)
@@ -89,5 +99,35 @@ func TestAlertRuleModel(t *testing.T) {
So(len(alertRule.Notifications), ShouldEqual, 2)
})
})
Convey("can construct alert rule model with invalid frequency", func() {
json := `
{
"name": "name2",
"description": "desc2",
"noDataMode": "critical",
"enabled": true,
"frequency": "0s",
"conditions": [ { "type": "test", "prop": 123 } ],
"notifications": []
}`
alertJSON, jsonErr := simplejson.NewJson([]byte(json))
So(jsonErr, ShouldBeNil)
alert := &m.Alert{
Id: 1,
OrgId: 1,
DashboardId: 1,
PanelId: 1,
Frequency: 0,
Settings: alertJSON,
}
alertRule, err := NewRuleFromDBAlert(alert)
So(err, ShouldBeNil)
So(alertRule.Frequency, ShouldEqual, 60)
})
})
}

View File

@@ -165,7 +165,7 @@ func (dr *dashboardServiceImpl) updateAlerting(cmd *models.SaveDashboardCommand,
}
if err := bus.Dispatch(&alertCmd); err != nil {
return models.ErrDashboardFailedToUpdateAlertData
return err
}
return nil
@@ -175,7 +175,9 @@ func (dr *dashboardServiceImpl) SaveProvisionedDashboard(dto *SaveDashboardDTO,
dto.User = &models.SignedInUser{
UserId: 0,
OrgRole: models.ROLE_ADMIN,
OrgId: dto.OrgId,
}
cmd, err := dr.buildSaveDashboardCommand(dto, true, false)
if err != nil {
return nil, err

View File

@@ -29,18 +29,22 @@ import (
// MysqlStore represents a mysql session store implementation.
type MysqlStore struct {
c *sql.DB
sid string
lock sync.RWMutex
data map[interface{}]interface{}
c *sql.DB
sid string
lock sync.RWMutex
data map[interface{}]interface{}
expiry int64
dirty bool
}
// NewMysqlStore creates and returns a mysql session store.
func NewMysqlStore(c *sql.DB, sid string, kv map[interface{}]interface{}) *MysqlStore {
func NewMysqlStore(c *sql.DB, sid string, kv map[interface{}]interface{}, expiry int64) *MysqlStore {
return &MysqlStore{
c: c,
sid: sid,
data: kv,
c: c,
sid: sid,
data: kv,
expiry: expiry,
dirty: false,
}
}
@@ -50,6 +54,7 @@ func (s *MysqlStore) Set(key, val interface{}) error {
defer s.lock.Unlock()
s.data[key] = val
s.dirty = true
return nil
}
@@ -67,6 +72,7 @@ func (s *MysqlStore) Delete(key interface{}) error {
defer s.lock.Unlock()
delete(s.data, key)
s.dirty = true
return nil
}
@@ -77,13 +83,20 @@ func (s *MysqlStore) ID() string {
// Release releases resource and save data to provider.
func (s *MysqlStore) Release() error {
newExpiry := time.Now().Unix()
if !s.dirty && (s.expiry+60) >= newExpiry {
return nil
}
data, err := session.EncodeGob(s.data)
if err != nil {
return err
}
_, err = s.c.Exec("UPDATE session SET data=?, expiry=? WHERE `key`=?",
data, time.Now().Unix(), s.sid)
data, newExpiry, s.sid)
s.dirty = false
s.expiry = newExpiry
return err
}
@@ -93,6 +106,7 @@ func (s *MysqlStore) Flush() error {
defer s.lock.Unlock()
s.data = make(map[interface{}]interface{})
s.dirty = true
return nil
}
@@ -117,11 +131,12 @@ func (p *MysqlProvider) Init(expire int64, connStr string) (err error) {
// Read returns raw session store by session ID.
func (p *MysqlProvider) Read(sid string) (session.RawStore, error) {
expiry := time.Now().Unix()
var data []byte
err := p.c.QueryRow("SELECT data FROM session WHERE `key`=?", sid).Scan(&data)
err := p.c.QueryRow("SELECT data,expiry FROM session WHERE `key`=?", sid).Scan(&data, &expiry)
if err == sql.ErrNoRows {
_, err = p.c.Exec("INSERT INTO session(`key`,data,expiry) VALUES(?,?,?)",
sid, "", time.Now().Unix())
sid, "", expiry)
}
if err != nil {
return nil, err
@@ -137,7 +152,7 @@ func (p *MysqlProvider) Read(sid string) (session.RawStore, error) {
}
}
return NewMysqlStore(p.c, sid, kv), nil
return NewMysqlStore(p.c, sid, kv, expiry), nil
}
// Exist returns true if session with given ID exists.

View File

@@ -53,14 +53,14 @@ func GetDataSourceByName(query *m.GetDataSourceByNameQuery) error {
}
func GetDataSources(query *m.GetDataSourcesQuery) error {
sess := x.Limit(1000, 0).Where("org_id=?", query.OrgId).Asc("name")
sess := x.Limit(5000, 0).Where("org_id=?", query.OrgId).Asc("name")
query.Result = make([]*m.DataSource, 0)
return sess.Find(&query.Result)
}
func GetAllDataSources(query *m.GetAllDataSourcesQuery) error {
sess := x.Limit(1000, 0).Asc("name")
sess := x.Limit(5000, 0).Asc("name")
query.Result = make([]*m.DataSource, 0)
return sess.Find(&query.Result)

View File

@@ -99,14 +99,14 @@ func UpdateOrgQuota(cmd *m.UpdateOrgQuotaCmd) error {
return inTransaction(func(sess *DBSession) error {
//Check if quota is already defined in the DB
quota := m.Quota{
Target: cmd.Target,
OrgId: cmd.OrgId,
Updated: time.Now(),
Target: cmd.Target,
OrgId: cmd.OrgId,
}
has, err := sess.Get(&quota)
if err != nil {
return err
}
quota.Updated = time.Now()
quota.Limit = cmd.Limit
if !has {
quota.Created = time.Now()
@@ -201,14 +201,14 @@ func UpdateUserQuota(cmd *m.UpdateUserQuotaCmd) error {
return inTransaction(func(sess *DBSession) error {
//Check if quota is already defined in the DB
quota := m.Quota{
Target: cmd.Target,
UserId: cmd.UserId,
Updated: time.Now(),
Target: cmd.Target,
UserId: cmd.UserId,
}
has, err := sess.Get(&quota)
if err != nil {
return err
}
quota.Updated = time.Now()
quota.Limit = cmd.Limit
if !has {
quota.Created = time.Now()

View File

@@ -2,6 +2,7 @@ package sqlstore
import (
"testing"
"time"
m "github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/setting"
@@ -168,5 +169,69 @@ func TestQuotaCommandsAndQueries(t *testing.T) {
So(query.Result.Limit, ShouldEqual, 5)
So(query.Result.Used, ShouldEqual, 1)
})
// related: https://github.com/grafana/grafana/issues/14342
Convey("Should org quota updating is successful even if it called multiple time", func() {
orgCmd := m.UpdateOrgQuotaCmd{
OrgId: orgId,
Target: "org_user",
Limit: 5,
}
err := UpdateOrgQuota(&orgCmd)
So(err, ShouldBeNil)
query := m.GetOrgQuotaByTargetQuery{OrgId: orgId, Target: "org_user", Default: 1}
err = GetOrgQuotaByTarget(&query)
So(err, ShouldBeNil)
So(query.Result.Limit, ShouldEqual, 5)
// XXX: resolution of `Updated` column is 1sec, so this makes delay
time.Sleep(1 * time.Second)
orgCmd = m.UpdateOrgQuotaCmd{
OrgId: orgId,
Target: "org_user",
Limit: 10,
}
err = UpdateOrgQuota(&orgCmd)
So(err, ShouldBeNil)
query = m.GetOrgQuotaByTargetQuery{OrgId: orgId, Target: "org_user", Default: 1}
err = GetOrgQuotaByTarget(&query)
So(err, ShouldBeNil)
So(query.Result.Limit, ShouldEqual, 10)
})
// related: https://github.com/grafana/grafana/issues/14342
Convey("Should user quota updating is successful even if it called multiple time", func() {
userQuotaCmd := m.UpdateUserQuotaCmd{
UserId: userId,
Target: "org_user",
Limit: 5,
}
err := UpdateUserQuota(&userQuotaCmd)
So(err, ShouldBeNil)
query := m.GetUserQuotaByTargetQuery{UserId: userId, Target: "org_user", Default: 1}
err = GetUserQuotaByTarget(&query)
So(err, ShouldBeNil)
So(query.Result.Limit, ShouldEqual, 5)
// XXX: resolution of `Updated` column is 1sec, so this makes delay
time.Sleep(1 * time.Second)
userQuotaCmd = m.UpdateUserQuotaCmd{
UserId: userId,
Target: "org_user",
Limit: 10,
}
err = UpdateUserQuota(&userQuotaCmd)
So(err, ShouldBeNil)
query = m.GetUserQuotaByTargetQuery{UserId: userId, Target: "org_user", Default: 1}
err = GetUserQuotaByTarget(&query)
So(err, ShouldBeNil)
So(query.Result.Limit, ShouldEqual, 10)
})
})
}

View File

@@ -345,8 +345,12 @@ func GetUserOrgList(query *m.GetUserOrgListQuery) error {
return err
}
func newSignedInUserCacheKey(orgID, userID int64) string {
return fmt.Sprintf("signed-in-user-%d-%d", userID, orgID)
}
func (ss *SqlStore) GetSignedInUserWithCache(query *m.GetSignedInUserQuery) error {
cacheKey := fmt.Sprintf("signed-in-user-%d-%d", query.UserId, query.OrgId)
cacheKey := newSignedInUserCacheKey(query.OrgId, query.UserId)
if cached, found := ss.CacheService.Get(cacheKey); found {
query.Result = cached.(*m.SignedInUser)
return nil
@@ -357,6 +361,7 @@ func (ss *SqlStore) GetSignedInUserWithCache(query *m.GetSignedInUserQuery) erro
return err
}
cacheKey = newSignedInUserCacheKey(query.Result.OrgId, query.UserId)
ss.CacheService.Set(cacheKey, query.Result, time.Second*5)
return nil
}

View File

@@ -13,7 +13,7 @@ import (
func TestUserDataAccess(t *testing.T) {
Convey("Testing DB", t, func() {
InitTestDB(t)
ss := InitTestDB(t)
Convey("Creating a user", func() {
cmd := &m.CreateUserCommand{
@@ -153,6 +153,27 @@ func TestUserDataAccess(t *testing.T) {
So(prefsQuery.Result.UserId, ShouldEqual, 0)
})
})
Convey("when retreiving signed in user for orgId=0 result should return active org id", func() {
ss.CacheService.Flush()
query := &m.GetSignedInUserQuery{OrgId: users[1].OrgId, UserId: users[1].Id}
err := ss.GetSignedInUserWithCache(query)
So(err, ShouldBeNil)
So(query.Result, ShouldNotBeNil)
So(query.OrgId, ShouldEqual, users[1].OrgId)
err = SetUsingOrg(&m.SetUsingOrgCommand{UserId: users[1].Id, OrgId: users[0].OrgId})
So(err, ShouldBeNil)
query = &m.GetSignedInUserQuery{OrgId: 0, UserId: users[1].Id}
err = ss.GetSignedInUserWithCache(query)
So(err, ShouldBeNil)
So(query.Result, ShouldNotBeNil)
So(query.Result.OrgId, ShouldEqual, users[0].OrgId)
cacheKey := newSignedInUserCacheKey(query.Result.OrgId, query.UserId)
_, found := ss.CacheService.Get(cacheKey)
So(found, ShouldBeTrue)
})
})
})
})

View File

@@ -126,6 +126,18 @@ func (e *CloudWatchExecutor) executeTimeSeriesQuery(ctx context.Context, queryCo
}
eg.Go(func() error {
defer func() {
if err := recover(); err != nil {
plog.Error("Execute Query Panic", "error", err, "stack", log.Stack(1))
if theErr, ok := err.(error); ok {
resultChan <- &tsdb.QueryResult{
RefId: query.RefId,
Error: theErr,
}
}
}
}()
queryRes, err := e.executeQuery(ectx, query, queryContext)
if ae, ok := err.(awserr.Error); ok && ae.Code() == "500" {
return err
@@ -146,6 +158,17 @@ func (e *CloudWatchExecutor) executeTimeSeriesQuery(ctx context.Context, queryCo
for region, getMetricDataQuery := range getMetricDataQueries {
q := getMetricDataQuery
eg.Go(func() error {
defer func() {
if err := recover(); err != nil {
plog.Error("Execute Get Metric Data Query Panic", "error", err, "stack", log.Stack(1))
if theErr, ok := err.(error); ok {
resultChan <- &tsdb.QueryResult{
Error: theErr,
}
}
}
}()
queryResponses, err := e.executeGetMetricDataQuery(ectx, region, q, queryContext)
if ae, ok := err.(awserr.Error); ok && ae.Code() == "500" {
return err
@@ -188,8 +211,8 @@ func (e *CloudWatchExecutor) executeQuery(ctx context.Context, query *CloudWatch
return nil, err
}
if endTime.Before(startTime) {
return nil, fmt.Errorf("Invalid time range: End time can't be before start time")
if !startTime.Before(endTime) {
return nil, fmt.Errorf("Invalid time range: Start time must be before end time")
}
params := &cloudwatch.GetMetricStatisticsInput{

View File

@@ -1,9 +1,13 @@
package cloudwatch
import (
"context"
"testing"
"time"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/tsdb"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/grafana/grafana/pkg/components/null"
@@ -14,6 +18,24 @@ import (
func TestCloudWatch(t *testing.T) {
Convey("CloudWatch", t, func() {
Convey("executeQuery", func() {
e := &CloudWatchExecutor{
DataSource: &models.DataSource{
JsonData: simplejson.New(),
},
}
Convey("End time before start time should result in error", func() {
_, err := e.executeQuery(context.Background(), &CloudWatchQuery{}, &tsdb.TsdbQuery{TimeRange: tsdb.NewTimeRange("now-1h", "now-2h")})
So(err.Error(), ShouldEqual, "Invalid time range: Start time must be before end time")
})
Convey("End time equals start time should result in error", func() {
_, err := e.executeQuery(context.Background(), &CloudWatchQuery{}, &tsdb.TsdbQuery{TimeRange: tsdb.NewTimeRange("now-1h", "now-1h")})
So(err.Error(), ShouldEqual, "Invalid time range: Start time must be before end time")
})
})
Convey("can parse cloudwatch json model", func() {
json := `
{

View File

@@ -47,6 +47,7 @@ func init() {
"AWS/CloudFront": {"Requests", "BytesDownloaded", "BytesUploaded", "TotalErrorRate", "4xxErrorRate", "5xxErrorRate"},
"AWS/CloudSearch": {"SuccessfulRequests", "SearchableDocuments", "IndexUtilization", "Partitions"},
"AWS/CloudHSM": {"HsmUnhealthy", "HsmTemperature", "HsmKeysSessionOccupied", "HsmKeysTokenOccupied", "HsmSslCtxsOccupied", "HsmSessionCount", "HsmUsersAvailable", "HsmUsersMax", "InterfaceEth2OctetsInput", "InterfaceEth2OctetsOutput"},
"AWS/CodeBuild": {"BuildDuration", "Builds", "DownloadSourceDuration", "Duration", "FailedBuilds", "FinalizingDuration", "InstallDuration", "PostBuildDuration", "PreBuildDuration", "ProvisioningDuration", "QueuedDuration", "SubmittedDuration", "SucceededBuilds", "UploadArtifactsDuration"},
"AWS/Connect": {"CallsBreachingConcurrencyQuota", "CallBackNotDialableNumber", "CallRecordingUploadError", "CallsPerInterval", "ConcurrentCalls", "ConcurrentCallsPercentage", "ContactFlowErrors", "ContactFlowFatalErrors", "LongestQueueWaitTime", "MissedCalls", "MisconfiguredPhoneNumbers", "PublicSigningKeyUsage", "QueueCapacityExceededError", "QueueSize", "ThrottledCalls", "ToInstancePacketLossRate"},
"AWS/DMS": {"FreeableMemory", "WriteIOPS", "ReadIOPS", "WriteThroughput", "ReadThroughput", "WriteLatency", "ReadLatency", "SwapUsage", "NetworkTransmitThroughput", "NetworkReceiveThroughput", "FullLoadThroughputBandwidthSource", "FullLoadThroughputBandwidthTarget", "FullLoadThroughputRowsSource", "FullLoadThroughputRowsTarget", "CDCIncomingChanges", "CDCChangesMemorySource", "CDCChangesMemoryTarget", "CDCChangesDiskSource", "CDCChangesDiskTarget", "CDCThroughputBandwidthTarget", "CDCThroughputRowsSource", "CDCThroughputRowsTarget", "CDCLatencySource", "CDCLatencyTarget"},
"AWS/DX": {"ConnectionState", "ConnectionBpsEgress", "ConnectionBpsIngress", "ConnectionPpsEgress", "ConnectionPpsIngress", "ConnectionCRCErrorCount", "ConnectionLightLevelTx", "ConnectionLightLevelRx"},
@@ -100,7 +101,7 @@ func init() {
"AWS/RDS": {"ActiveTransactions", "AuroraBinlogReplicaLag", "AuroraReplicaLag", "AuroraReplicaLagMaximum", "AuroraReplicaLagMinimum", "BinLogDiskUsage", "BlockedTransactions", "BufferCacheHitRatio", "BurstBalance", "CommitLatency", "CommitThroughput", "BinLogDiskUsage", "CPUCreditBalance", "CPUCreditUsage", "CPUUtilization", "DatabaseConnections", "DDLLatency", "DDLThroughput", "Deadlocks", "DeleteLatency", "DeleteThroughput", "DiskQueueDepth", "DMLLatency", "DMLThroughput", "EngineUptime", "FailedSqlStatements", "FreeableMemory", "FreeLocalStorage", "FreeStorageSpace", "InsertLatency", "InsertThroughput", "LoginFailures", "NetworkReceiveThroughput", "NetworkTransmitThroughput", "NetworkThroughput", "Queries", "ReadIOPS", "ReadLatency", "ReadThroughput", "ReplicaLag", "ResultSetCacheHitRatio", "SelectLatency", "SelectThroughput", "SwapUsage", "TotalConnections", "UpdateLatency", "UpdateThroughput", "VolumeBytesUsed", "VolumeReadIOPS", "VolumeWriteIOPS", "WriteIOPS", "WriteLatency", "WriteThroughput"},
"AWS/Route53": {"ChildHealthCheckHealthyCount", "HealthCheckStatus", "HealthCheckPercentageHealthy", "ConnectionTime", "SSLHandshakeTime", "TimeToFirstByte"},
"AWS/S3": {"BucketSizeBytes", "NumberOfObjects", "AllRequests", "GetRequests", "PutRequests", "DeleteRequests", "HeadRequests", "PostRequests", "ListRequests", "BytesDownloaded", "BytesUploaded", "4xxErrors", "5xxErrors", "FirstByteLatency", "TotalRequestLatency"},
"AWS/SES": {"Bounce", "Complaint", "Delivery", "Reject", "Send"},
"AWS/SES": {"Bounce", "Complaint", "Delivery", "Reject", "Send", "Reputation.BounceRate", "Reputation.ComplaintRate"},
"AWS/SNS": {"NumberOfMessagesPublished", "PublishSize", "NumberOfNotificationsDelivered", "NumberOfNotificationsFailed"},
"AWS/SQS": {"NumberOfMessagesSent", "SentMessageSize", "NumberOfMessagesReceived", "NumberOfEmptyReceives", "NumberOfMessagesDeleted", "ApproximateAgeOfOldestMessage", "ApproximateNumberOfMessagesDelayed", "ApproximateNumberOfMessagesVisible", "ApproximateNumberOfMessagesNotVisible"},
"AWS/States": {"ExecutionTime", "ExecutionThrottled", "ExecutionsAborted", "ExecutionsFailed", "ExecutionsStarted", "ExecutionsSucceeded", "ExecutionsTimedOut", "ActivityRunTime", "ActivityScheduleTime", "ActivityTime", "ActivitiesFailed", "ActivitiesHeartbeatTimedOut", "ActivitiesScheduled", "ActivitiesScheduled", "ActivitiesSucceeded", "ActivitiesTimedOut", "LambdaFunctionRunTime", "LambdaFunctionScheduleTime", "LambdaFunctionTime", "LambdaFunctionsFailed", "LambdaFunctionsHeartbeatTimedOut", "LambdaFunctionsScheduled", "LambdaFunctionsStarted", "LambdaFunctionsSucceeded", "LambdaFunctionsTimedOut"},
@@ -123,6 +124,7 @@ func init() {
"AWS/CloudFront": {"DistributionId", "Region"},
"AWS/CloudSearch": {},
"AWS/CloudHSM": {"Region", "ClusterId", "HsmId"},
"AWS/CodeBuild": {"ProjectName"},
"AWS/Connect": {"InstanceId", "MetricGroup", "Participant", "QueueName", "Stream Type", "Type of Connection"},
"AWS/DMS": {"ReplicationInstanceIdentifier", "ReplicationTaskIdentifier"},
"AWS/DX": {"ConnectionId"},

View File

@@ -65,7 +65,7 @@ var NewClient = func(ctx context.Context, ds *models.DataSource, timeRange *tsdb
clientLog.Debug("Creating new client", "version", version, "timeField", timeField, "indices", strings.Join(indices, ", "))
switch version {
case 2, 5, 56:
case 2, 5, 56, 60:
return &baseClientImpl{
ctx: ctx,
ds: ds,

View File

@@ -90,6 +90,19 @@ func TestClient(t *testing.T) {
So(err, ShouldBeNil)
So(c.GetVersion(), ShouldEqual, 56)
})
Convey("When version 60 should return v6.0 client", func() {
ds := &models.DataSource{
JsonData: simplejson.NewFromAny(map[string]interface{}{
"esVersion": 60,
"timeField": "@timestamp",
}),
}
c, err := NewClient(context.Background(), ds, nil)
So(err, ShouldBeNil)
So(c.GetVersion(), ShouldEqual, 60)
})
})
Convey("Given a fake http client", func() {
@@ -153,8 +166,6 @@ func TestClient(t *testing.T) {
jBody, err := simplejson.NewJson(bodyBytes)
So(err, ShouldBeNil)
fmt.Println("body", string(headerBytes))
So(jHeader.Get("index").MustString(), ShouldEqual, "metrics-2018.05.15")
So(jHeader.Get("ignore_unavailable").MustBool(false), ShouldEqual, true)
So(jHeader.Get("search_type").MustString(), ShouldEqual, "count")
@@ -209,8 +220,6 @@ func TestClient(t *testing.T) {
jBody, err := simplejson.NewJson(bodyBytes)
So(err, ShouldBeNil)
fmt.Println("body", string(headerBytes))
So(jHeader.Get("index").MustString(), ShouldEqual, "metrics-2018.05.15")
So(jHeader.Get("ignore_unavailable").MustBool(false), ShouldEqual, true)
So(jHeader.Get("search_type").MustString(), ShouldEqual, "query_then_fetch")
@@ -265,8 +274,6 @@ func TestClient(t *testing.T) {
jBody, err := simplejson.NewJson(bodyBytes)
So(err, ShouldBeNil)
fmt.Println("body", string(headerBytes))
So(jHeader.Get("index").MustString(), ShouldEqual, "metrics-2018.05.15")
So(jHeader.Get("ignore_unavailable").MustBool(false), ShouldEqual, true)
So(jHeader.Get("search_type").MustString(), ShouldEqual, "query_then_fetch")

View File

@@ -32,6 +32,7 @@ func init() {
renders["median"] = QueryDefinition{Renderer: functionRenderer}
renders["sum"] = QueryDefinition{Renderer: functionRenderer}
renders["mode"] = QueryDefinition{Renderer: functionRenderer}
renders["cumulative_sum"] = QueryDefinition{Renderer: functionRenderer}
renders["holt_winters"] = QueryDefinition{
Renderer: functionRenderer,

View File

@@ -23,6 +23,7 @@ func TestInfluxdbQueryPart(t *testing.T) {
{mode: "alias", params: []string{"test"}, input: "mean(value)", expected: `mean(value) AS "test"`},
{mode: "count", params: []string{}, input: "distinct(value)", expected: `count(distinct(value))`},
{mode: "mode", params: []string{}, input: "value", expected: `mode(value)`},
{mode: "cumulative_sum", params: []string{}, input: "mean(value)", expected: `cumulative_sum(mean(value))`},
}
queryContext := &tsdb.TsdbQuery{TimeRange: tsdb.NewTimeRange("5m", "now")}

View File

@@ -66,6 +66,10 @@ func (m *msSqlMacroEngine) evaluateMacro(name string, args []string) (string, er
}
return fmt.Sprintf("%s BETWEEN '%s' AND '%s'", args[0], m.timeRange.GetFromAsTimeUTC().Format(time.RFC3339), m.timeRange.GetToAsTimeUTC().Format(time.RFC3339)), nil
case "__timeFrom":
return fmt.Sprintf("'%s'", m.timeRange.GetFromAsTimeUTC().Format(time.RFC3339)), nil
case "__timeTo":
return fmt.Sprintf("'%s'", m.timeRange.GetToAsTimeUTC().Format(time.RFC3339)), nil
case "__timeGroup":
if len(args) < 2 {
return "", fmt.Errorf("macro %v needs time column and interval", name)

View File

@@ -52,6 +52,20 @@ func TestMacroEngine(t *testing.T) {
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column BETWEEN '%s' AND '%s'", from.Format(time.RFC3339), to.Format(time.RFC3339)))
})
Convey("interpolate __timeFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select '2018-04-12T18:00:00Z'")
})
Convey("interpolate __timeTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select '2018-04-12T18:05:00Z'")
})
Convey("interpolate __timeGroup function", func() {
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m')")
So(err, ShouldBeNil)

View File

@@ -61,6 +61,10 @@ func (m *mySqlMacroEngine) evaluateMacro(name string, args []string) (string, er
}
return fmt.Sprintf("%s BETWEEN FROM_UNIXTIME(%d) AND FROM_UNIXTIME(%d)", args[0], m.timeRange.GetFromAsSecondsEpoch(), m.timeRange.GetToAsSecondsEpoch()), nil
case "__timeFrom":
return fmt.Sprintf("FROM_UNIXTIME(%d)", m.timeRange.GetFromAsSecondsEpoch()), nil
case "__timeTo":
return fmt.Sprintf("FROM_UNIXTIME(%d)", m.timeRange.GetToAsSecondsEpoch()), nil
case "__timeGroup":
if len(args) < 2 {
return "", fmt.Errorf("macro %v needs time column and interval", name)

View File

@@ -63,6 +63,20 @@ func TestMacroEngine(t *testing.T) {
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column BETWEEN FROM_UNIXTIME(%d) AND FROM_UNIXTIME(%d)", from.Unix(), to.Unix()))
})
Convey("interpolate __timeFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select FROM_UNIXTIME(%d)", from.Unix()))
})
Convey("interpolate __timeTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select FROM_UNIXTIME(%d)", to.Unix()))
})
Convey("interpolate __unixEpochFilter function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(time)")
So(err, ShouldBeNil)

View File

@@ -761,7 +761,7 @@ func TestMySQL(t *testing.T) {
{
DataSource: &models.DataSource{JsonData: simplejson.New()},
Model: simplejson.NewFromAny(map[string]interface{}{
"rawSql": `SELECT time FROM metric_values WHERE time > $__timeFrom() OR time < $__timeFrom() OR 1 < $__unixEpochFrom() OR $__unixEpochTo() > 1 ORDER BY 1`,
"rawSql": `SELECT time FROM metric_values WHERE time > $__timeFrom() OR time < $__timeTo() OR 1 < $__unixEpochFrom() OR $__unixEpochTo() > 1 ORDER BY 1`,
"format": "time_series",
}),
RefId: "A",
@@ -773,7 +773,7 @@ func TestMySQL(t *testing.T) {
So(err, ShouldBeNil)
queryResult := resp.Results["A"]
So(queryResult.Error, ShouldBeNil)
So(queryResult.Meta.Get("sql").MustString(), ShouldEqual, "SELECT time FROM metric_values WHERE time > '2018-03-15T12:55:00Z' OR time < '2018-03-15T12:55:00Z' OR 1 < 1521118500 OR 1521118800 > 1 ORDER BY 1")
So(queryResult.Meta.Get("sql").MustString(), ShouldEqual, "SELECT time FROM metric_values WHERE time > FROM_UNIXTIME(1521118500) OR time < FROM_UNIXTIME(1521118800) OR 1 < 1521118500 OR 1521118800 > 1 ORDER BY 1")
})

View File

@@ -87,6 +87,10 @@ func (m *postgresMacroEngine) evaluateMacro(name string, args []string) (string,
}
return fmt.Sprintf("%s BETWEEN '%s' AND '%s'", args[0], m.timeRange.GetFromAsTimeUTC().Format(time.RFC3339), m.timeRange.GetToAsTimeUTC().Format(time.RFC3339)), nil
case "__timeFrom":
return fmt.Sprintf("'%s'", m.timeRange.GetFromAsTimeUTC().Format(time.RFC3339)), nil
case "__timeTo":
return fmt.Sprintf("'%s'", m.timeRange.GetToAsTimeUTC().Format(time.RFC3339)), nil
case "__timeGroup":
if len(args) < 2 {
return "", fmt.Errorf("macro %v needs time column and interval and optional fill value", name)

View File

@@ -44,6 +44,20 @@ func TestMacroEngine(t *testing.T) {
So(sql, ShouldEqual, fmt.Sprintf("WHERE time_column BETWEEN '%s' AND '%s'", from.Format(time.RFC3339), to.Format(time.RFC3339)))
})
Convey("interpolate __timeFrom function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select '2018-04-12T18:00:00Z'")
})
Convey("interpolate __timeTo function", func() {
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, "select '2018-04-12T18:05:00Z'")
})
Convey("interpolate __timeGroup function pre 5.3 compatibility", func() {
sql, err := engine.Interpolate(query, timeRange, "SELECT $__timeGroup(time_column,'5m'), value")

View File

@@ -196,8 +196,6 @@ var Interpolate = func(query *Query, timeRange *TimeRange, sql string) (string,
sql = strings.Replace(sql, "$__interval_ms", strconv.FormatInt(interval.Milliseconds(), 10), -1)
sql = strings.Replace(sql, "$__interval", interval.Text, -1)
sql = strings.Replace(sql, "$__timeFrom()", fmt.Sprintf("'%s'", timeRange.GetFromAsTimeUTC().Format(time.RFC3339)), -1)
sql = strings.Replace(sql, "$__timeTo()", fmt.Sprintf("'%s'", timeRange.GetToAsTimeUTC().Format(time.RFC3339)), -1)
sql = strings.Replace(sql, "$__unixEpochFrom()", fmt.Sprintf("%d", timeRange.GetFromAsSecondsEpoch()), -1)
sql = strings.Replace(sql, "$__unixEpochTo()", fmt.Sprintf("%d", timeRange.GetToAsSecondsEpoch()), -1)

View File

@@ -44,20 +44,6 @@ func TestSqlEngine(t *testing.T) {
So(sql, ShouldEqual, "select 60000 ")
})
Convey("interpolate __timeFrom function", func() {
sql, err := Interpolate(query, timeRange, "select $__timeFrom()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select '%s'", from.Format(time.RFC3339)))
})
Convey("interpolate __timeTo function", func() {
sql, err := Interpolate(query, timeRange, "select $__timeTo()")
So(err, ShouldBeNil)
So(sql, ShouldEqual, fmt.Sprintf("select '%s'", to.Format(time.RFC3339)))
})
Convey("interpolate __unixEpochFrom function", func() {
sql, err := Interpolate(query, timeRange, "select $__unixEpochFrom()")
So(err, ShouldBeNil)

View File

@@ -16,7 +16,7 @@ export function registerAngularDirectives() {
react2AngularDirective('searchResult', SearchResult, []);
react2AngularDirective('tagFilter', TagFilter, [
'tags',
['onSelect', { watchDepth: 'reference' }],
['onChange', { watchDepth: 'reference' }],
['tagOptions', { watchDepth: 'reference' }],
]);
}

View File

@@ -84,7 +84,7 @@ class AddPermissions extends Component<Props, NewDashboardAclItem> {
render() {
const { onCancel } = this.props;
const newItem = this.state;
const pickerClassName = 'width-20';
const pickerClassName = 'min-width-20';
const isValid = this.isValid();
return (
<div className="gf-form-inline cta-form">

View File

@@ -40,7 +40,7 @@ export class UserPicker extends Component<Props, State> {
.then(result => {
return result.map(user => ({
id: user.userId,
label: `${user.login} - ${user.email}`,
label: user.login === user.email ? user.login : `${user.login} - ${user.email}`,
avatarUrl: user.avatarUrl,
login: user.login,
}));

View File

@@ -10,7 +10,7 @@ import ResetStyles from 'app/core/components/Picker/ResetStyles';
export interface Props {
tags: string[];
tagOptions: () => any;
onSelect: (tag: string) => void;
onChange: (tags: string[]) => void;
}
export class TagFilter extends React.Component<Props, any> {
@@ -18,12 +18,9 @@ export class TagFilter extends React.Component<Props, any> {
constructor(props) {
super(props);
this.searchTags = this.searchTags.bind(this);
this.onChange = this.onChange.bind(this);
}
searchTags(query) {
onLoadOptions = query => {
return this.props.tagOptions().then(options => {
return options.map(option => ({
value: option.term,
@@ -31,18 +28,20 @@ export class TagFilter extends React.Component<Props, any> {
count: option.count,
}));
});
}
};
onChange(newTags) {
this.props.onSelect(newTags);
}
onChange = (newTags: any[]) => {
this.props.onChange(newTags.map(tag => tag.value));
};
render() {
const tags = this.props.tags.map(tag => ({ value: tag, label: tag, count: 0 }));
const selectOptions = {
classNamePrefix: 'gf-form-select-box',
isMulti: true,
defaultOptions: true,
loadOptions: this.searchTags,
loadOptions: this.onLoadOptions,
onChange: this.onChange,
className: 'gf-form-input gf-form-input--form-dropdown',
placeholder: 'Tags',
@@ -50,8 +49,12 @@ export class TagFilter extends React.Component<Props, any> {
noOptionsMessage: () => 'No tags found',
getOptionValue: i => i.value,
getOptionLabel: i => i.label,
value: this.props.tags,
value: tags,
styles: ResetStyles,
filterOption: (option, searchQuery) => {
const regex = RegExp(searchQuery, 'i');
return regex.test(option.value);
},
components: {
Option: TagOption,
IndicatorsContainer,

View File

@@ -44,7 +44,7 @@ export class SeriesColorPicker extends React.Component<SeriesColorPickerProps> {
const drop = new Drop({
target: this.pickerElem,
content: dropContentElem,
position: 'top center',
position: 'bottom center',
classes: 'drop-popover',
openOn: 'hover',
hoverCloseDelay: 200,

View File

@@ -41,7 +41,7 @@
</a>
</div>
<tag-filter tags="ctrl.query.tag" tagOptions="ctrl.getTags" onSelect="ctrl.onTagSelect">
<tag-filter tags="ctrl.query.tag" tagOptions="ctrl.getTags" onChange="ctrl.onTagFiltersChanged">
</tag-filter>
</div>

View File

@@ -25,8 +25,6 @@ export class SearchCtrl {
appEvents.on('hide-dash-search', this.closeSearch.bind(this), $scope);
this.initialFolderFilterTitle = 'All';
this.getTags = this.getTags.bind(this);
this.onTagSelect = this.onTagSelect.bind(this);
this.isEditor = contextSrv.isEditor;
this.hasEditPermissionInFolders = contextSrv.hasEditPermissionInFolders;
}
@@ -162,7 +160,7 @@ export class SearchCtrl {
const localSearchId = this.currentSearchId;
const query = {
...this.query,
tag: this.query.tag.map(i => i.value),
tag: this.query.tag,
};
return this.searchSrv.search(query).then(results => {
@@ -195,14 +193,14 @@ export class SearchCtrl {
evt.preventDefault();
}
getTags() {
getTags = () => {
return this.searchSrv.getDashboardTags();
}
};
onTagSelect(newTags) {
this.query.tag = newTags;
onTagFiltersChanged = (tags: string[]) => {
this.query.tag = tags;
this.search();
}
};
clearSearchFilter() {
this.query.tag = [];

View File

@@ -428,10 +428,16 @@ kbn.valueFormats.hex0x = (value, decimals) => {
};
kbn.valueFormats.sci = (value, decimals) => {
if (value == null) {
return '';
}
return value.toExponential(decimals);
};
kbn.valueFormats.locale = (value, decimals) => {
if (value == null) {
return '';
}
return value.toLocaleString(undefined, { maximumFractionDigits: decimals });
};
@@ -584,8 +590,8 @@ kbn.valueFormats.flowcms = kbn.formatBuilders.fixedUnit('cms');
kbn.valueFormats.flowcfs = kbn.formatBuilders.fixedUnit('cfs');
kbn.valueFormats.flowcfm = kbn.formatBuilders.fixedUnit('cfm');
kbn.valueFormats.litreh = kbn.formatBuilders.fixedUnit('l/h');
kbn.valueFormats.flowlpm = kbn.formatBuilders.decimalSIPrefix('L');
kbn.valueFormats.flowmlpm = kbn.formatBuilders.decimalSIPrefix('L', -1);
kbn.valueFormats.flowlpm = kbn.formatBuilders.fixedUnit('l/min');
kbn.valueFormats.flowmlpm = kbn.formatBuilders.fixedUnit('mL/min');
// Angle
kbn.valueFormats.degree = kbn.formatBuilders.fixedUnit('°');

View File

@@ -64,9 +64,9 @@
</div>
<div class="gf-form">
<metric-segment-model property="conditionModel.evaluator.type" options="ctrl.evalFunctions" custom="false" css-class="query-keyword" on-change="ctrl.evaluatorTypeChanged(conditionModel.evaluator)"></metric-segment-model>
<input class="gf-form-input max-width-9" type="number" step="any" ng-hide="conditionModel.evaluator.params.length === 0" ng-model="conditionModel.evaluator.params[0]" ng-change="ctrl.evaluatorParamsChanged()"></input>
<label class="gf-form-label query-keyword" ng-show="conditionModel.evaluator.params.length === 2">TO</label>
<input class="gf-form-input max-width-9" type="number" step="any" ng-if="conditionModel.evaluator.params.length === 2" ng-model="conditionModel.evaluator.params[1]" ng-change="ctrl.evaluatorParamsChanged()"></input>
<input class="gf-form-input max-width-9" type="number" step="any" ng-hide="conditionModel.evaluator.params.length === 0" ng-model="conditionModel.evaluator.params[0]" ng-change="ctrl.evaluatorParamsChanged()">
<label class="gf-form-label query-keyword" ng-show="conditionModel.evaluator.params.length === 2">TO</label>
<input class="gf-form-input max-width-9" type="number" step="any" ng-if="conditionModel.evaluator.params.length === 2" ng-model="conditionModel.evaluator.params[1]" ng-change="ctrl.evaluatorParamsChanged()">
</div>
<div class="gf-form">
<label class="gf-form-label">

View File

@@ -8,9 +8,9 @@ const alertQueryDef = new QueryPartDef({
{
name: 'from',
type: 'string',
options: ['1s', '10s', '1m', '5m', '10m', '15m', '1h', '24h', '48h'],
options: ['10s', '1m', '5m', '10m', '15m', '1h', '24h', '48h'],
},
{ name: 'to', type: 'string', options: ['now'] },
{ name: 'to', type: 'string', options: ['now', 'now-1m', 'now-5m', 'now-10m', 'now-1h'] },
],
defaultParams: ['#A', '15m', 'now', 'avg'],
});

View File

@@ -223,6 +223,8 @@ export class DashboardModel {
}
panelInitialized(panel: PanelModel) {
panel.initialized();
if (!this.otherPanelInFullscreen(panel)) {
panel.refresh();
}

View File

@@ -6,6 +6,7 @@ function dashLinksContainer() {
return {
scope: {
links: '=',
dashboard: '=',
},
restrict: 'E',
controller: 'DashLinksContainerCtrl',
@@ -20,6 +21,8 @@ function dashLink($compile, $sanitize, linkSrv) {
restrict: 'E',
link: (scope, elem) => {
const link = scope.link;
const dashboard = scope.dashboard;
let template =
'<div class="gf-form">' +
'<a class="pointer gf-form-label" data-placement="bottom"' +
@@ -76,7 +79,7 @@ function dashLink($compile, $sanitize, linkSrv) {
}
update();
scope.$on('refresh', update);
dashboard.events.on('refresh', update, scope);
},
};
}

View File

@@ -132,7 +132,7 @@ export class PanelModel {
}
}
panelInitialized() {
initialized() {
this.events.emit('panel-initialized');
}

View File

@@ -20,7 +20,7 @@
</div>
<div ng-if="ctrl.dashboard.links.length > 0" >
<dash-links-container links="ctrl.dashboard.links" class="gf-form-inline"></dash-links-container>
<dash-links-container links="ctrl.dashboard.links" dashboard="ctrl.dashboard" class="gf-form-inline"></dash-links-container>
</div>
<div class="clearfix"></div>

View File

@@ -148,7 +148,7 @@ export function loadDataSourceTypes(): ThunkResult<void> {
export function nameExits(dataSources, name) {
return (
dataSources.filter(dataSource => {
return dataSource.name === name;
return dataSource.name.toLowerCase() === name.toLowerCase();
}).length > 0
);
}

View File

@@ -1,5 +1,4 @@
<div class="panel panel--solo" ng-if="panel" style="width: 100%">
<div class="panel-solo" ng-if="panel">
<plugin-component type="panel">
</plugin-component>
</div>
<div class="clearfix"></div>

View File

@@ -115,7 +115,7 @@ export class TeamMembers extends PureComponent<Props, State> {
</button>
<h5>Add Team Member</h5>
<div className="gf-form-inline">
<UserPicker onSelected={this.onUserSelected} className="width-30" />
<UserPicker onSelected={this.onUserSelected} className="min-width-30" />
{this.state.newTeamMember && (
<button className="btn btn-success gf-form-btn" type="submit" onClick={this.onAddUserToTeam}>
Add to team

View File

@@ -58,7 +58,7 @@ exports[`Render should render component 1`] = `
className="gf-form-inline"
>
<UserPicker
className="width-30"
className="min-width-30"
onSelected={[Function]}
/>
</div>
@@ -152,7 +152,7 @@ exports[`Render should render team members 1`] = `
className="gf-form-inline"
>
<UserPicker
className="width-30"
className="min-width-30"
onSelected={[Function]}
/>
</div>
@@ -372,7 +372,7 @@ exports[`Render should render team members when sync enabled 1`] = `
className="gf-form-inline"
>
<UserPicker
className="width-30"
className="min-width-30"
onSelected={[Function]}
/>
</div>

View File

@@ -2,22 +2,8 @@ import coreModule from 'app/core/core_module';
import _ from 'lodash';
import * as queryDef from './query_def';
export function elasticBucketAgg() {
return {
templateUrl: 'public/app/plugins/datasource/elasticsearch/partials/bucket_agg.html',
controller: 'ElasticBucketAggCtrl',
restrict: 'E',
scope: {
target: '=',
index: '=',
onChange: '&',
getFields: '&',
},
};
}
export class ElasticBucketAggCtrl {
/** @nginject */
/** @ngInject */
constructor($scope, uiSegmentSrv, $q, $rootScope) {
const bucketAggs = $scope.target.bucketAggs;
@@ -226,5 +212,18 @@ export class ElasticBucketAggCtrl {
}
}
export function elasticBucketAgg() {
return {
templateUrl: 'public/app/plugins/datasource/elasticsearch/partials/bucket_agg.html',
controller: ElasticBucketAggCtrl,
restrict: 'E',
scope: {
target: '=',
index: '=',
onChange: '&',
getFields: '&',
},
};
}
coreModule.directive('elasticBucketAgg', elasticBucketAgg);
coreModule.controller('ElasticBucketAggCtrl', ElasticBucketAggCtrl);

View File

@@ -2,22 +2,8 @@ import coreModule from 'app/core/core_module';
import _ from 'lodash';
import * as queryDef from './query_def';
export function elasticMetricAgg() {
return {
templateUrl: 'public/app/plugins/datasource/elasticsearch/partials/metric_agg.html',
controller: 'ElasticMetricAggCtrl',
restrict: 'E',
scope: {
target: '=',
index: '=',
onChange: '&',
getFields: '&',
esVersion: '=',
},
};
}
export class ElasticMetricAggCtrl {
/** @ngInject */
constructor($scope, uiSegmentSrv, $q, $rootScope) {
const metricAggs = $scope.target.metrics;
$scope.metricAggTypes = queryDef.getMetricAggTypes($scope.esVersion);
@@ -209,5 +195,19 @@ export class ElasticMetricAggCtrl {
}
}
export function elasticMetricAgg() {
return {
templateUrl: 'public/app/plugins/datasource/elasticsearch/partials/metric_agg.html',
controller: ElasticMetricAggCtrl,
restrict: 'E',
scope: {
target: '=',
index: '=',
onChange: '&',
getFields: '&',
esVersion: '=',
},
};
}
coreModule.directive('elasticMetricAgg', elasticMetricAgg);
coreModule.controller('ElasticMetricAggCtrl', ElasticMetricAggCtrl);

View File

@@ -28,12 +28,12 @@ An annotation is an event that is overlaid on top of graphs. The query can have
Macros:
- $__time(column) -&gt; UNIX_TIMESTAMP(column) as time (or as time_sec)
- $__timeEpoch(column) -&gt; UNIX_TIMESTAMP(column) as time (or as time_sec)
- $__timeFilter(column) -&gt; column BETWEEN '2017-04-21T05:01:17Z' AND '2017-04-21T05:01:17Z'
- $__timeFilter(column) -&gt; column BETWEEN FROM_UNIXTIME(1492750877) AND FROM_UNIXTIME(1492750877)
- $__unixEpochFilter(column) -&gt; time_unix_epoch &gt; 1492750877 AND time_unix_epoch &lt; 1492750877
Or build your own conditionals using these macros which just return the values:
- $__timeFrom() -&gt; '2017-04-21T05:01:17Z'
- $__timeTo() -&gt; '2017-04-21T05:01:17Z'
- $__timeFrom() -&gt; FROM_UNIXTIME(1492750877)
- $__timeTo() -&gt; FROM_UNIXTIME(1492750877)
- $__unixEpochFrom() -&gt; 1492750877
- $__unixEpochTo() -&gt; 1492750877
</pre>

View File

@@ -151,7 +151,7 @@ Table:
Macros:
- $__time(column) -&gt; UNIX_TIMESTAMP(column) as time_sec
- $__timeEpoch(column) -&gt; UNIX_TIMESTAMP(column) as time_sec
- $__timeFilter(column) -&gt; column BETWEEN '2017-04-21T05:01:17Z' AND '2017-04-21T05:01:17Z'
- $__timeFilter(column) -&gt; column BETWEEN FROM_UNIXTIME(1492750877) AND FROM_UNIXTIME(1492750877)
- $__unixEpochFilter(column) -&gt; time_unix_epoch &gt; 1492750877 AND time_unix_epoch &lt; 1492750877
- $__timeGroup(column,'5m'[, fillvalue]) -&gt; cast(cast(UNIX_TIMESTAMP(column)/(300) as signed)*300 as signed)
by setting fillvalue grafana will fill in missing values according to the interval
@@ -169,8 +169,8 @@ GROUP BY 1
ORDER BY 1
Or build your own conditionals using these macros which just return the values:
- $__timeFrom() -&gt; '2017-04-21T05:01:17Z'
- $__timeTo() -&gt; '2017-04-21T05:01:17Z'
- $__timeFrom() -&gt; FROM_UNIXTIME(1492750877)
- $__timeTo() -&gt; FROM_UNIXTIME(1492750877)
- $__unixEpochFrom() -&gt; 1492750877
- $__unixEpochTo() -&gt; 1492750877
</pre>

View File

@@ -151,8 +151,7 @@ table_schema IN (
buildDatatypeQuery(column: string) {
let query = 'SELECT udt_name FROM information_schema.columns WHERE ';
query += this.buildSchemaConstraint();
query += ' AND table_name = ' + this.quoteIdentAsLiteral(this.target.table);
query += this.buildTableConstraint(this.target.table);
query += ' AND column_name = ' + this.quoteIdentAsLiteral(column);
return query;
}

View File

@@ -58,15 +58,7 @@ class GraphElement {
// panel events
this.ctrl.events.on('panel-teardown', this.onPanelTeardown.bind(this));
/**
* Split graph rendering into two parts.
* First, calculate series stats in buildFlotPairs() function. Then legend rendering started
* (see ctrl.events.on('render') in legend.ts).
* When legend is rendered it emits 'legend-rendering-complete' and graph rendered.
*/
this.ctrl.events.on('render', this.onRender.bind(this));
this.ctrl.events.on('legend-rendering-complete', this.onLegendRenderingComplete.bind(this));
// global events
appEvents.on('graph-hover', this.onGraphHover.bind(this), scope);
@@ -85,11 +77,20 @@ class GraphElement {
if (!this.data) {
return;
}
this.annotations = this.ctrl.annotations || [];
this.buildFlotPairs(this.data);
const graphHeight = this.elem.height();
updateLegendValues(this.data, this.panel, graphHeight);
if (!this.panel.legend.show) {
if (this.legendElem.hasChildNodes()) {
ReactDOM.unmountComponentAtNode(this.legendElem);
}
this.renderPanel();
return;
}
const { values, min, max, avg, current, total } = this.panel.legend;
const { alignAsTable, rightSide, sideWidth, sort, sortDesc, hideEmpty, hideZero } = this.panel.legend;
const legendOptions = { alignAsTable, rightSide, sideWidth, sort, sortDesc, hideEmpty, hideZero };
@@ -104,12 +105,9 @@ class GraphElement {
onColorChange: this.ctrl.onColorChange,
onToggleAxis: this.ctrl.onToggleAxis,
};
const legendReactElem = React.createElement(Legend, legendProps);
ReactDOM.render(legendReactElem, this.legendElem, () => this.onLegendRenderingComplete());
}
onLegendRenderingComplete() {
this.render_panel();
const legendReactElem = React.createElement(Legend, legendProps);
ReactDOM.render(legendReactElem, this.legendElem, () => this.renderPanel());
}
onGraphHover(evt) {
@@ -281,7 +279,7 @@ class GraphElement {
}
// Function for rendering panel
render_panel() {
renderPanel() {
this.panelWidth = this.elem.width();
if (this.shouldAbortRender()) {
return;

View File

@@ -125,7 +125,7 @@ describe('grafanaGraph', () => {
//Emulate functions called by event listeners
link.buildFlotPairs(link.data);
link.render_panel();
link.renderPanel();
ctx.plotData = ctrl.plot.mock.calls[0][1];
ctx.plotOptions = ctrl.plot.mock.calls[0][2];

View File

@@ -130,6 +130,33 @@ describe('TimeRegionManager', () => {
});
});
plotOptionsScenario('for time from/to region', ctx => {
const regions = [{ from: '00:00', to: '05:00', fill: true, colorMode: 'red' }];
const from = moment('2018-12-01T00:00+01:00');
const to = moment('2018-12-03T23:59+01:00');
ctx.setup(regions, from, to);
it('should add 3 markings', () => {
expect(ctx.options.grid.markings.length).toBe(3);
});
it('should add one fill between 00:00 and 05:00 each day', () => {
const markings = ctx.options.grid.markings;
expect(moment(markings[0].xaxis.from).format()).toBe(moment('2018-12-01T01:00:00+01:00').format());
expect(moment(markings[0].xaxis.to).format()).toBe(moment('2018-12-01T06:00:00+01:00').format());
expect(markings[0].color).toBe(colorModes.red.color.fill);
expect(moment(markings[1].xaxis.from).format()).toBe(moment('2018-12-02T01:00:00+01:00').format());
expect(moment(markings[1].xaxis.to).format()).toBe(moment('2018-12-02T06:00:00+01:00').format());
expect(markings[1].color).toBe(colorModes.red.color.fill);
expect(moment(markings[2].xaxis.from).format()).toBe(moment('2018-12-03T01:00:00+01:00').format());
expect(moment(markings[2].xaxis.to).format()).toBe(moment('2018-12-03T06:00:00+01:00').format());
expect(markings[2].color).toBe(colorModes.red.color.fill);
});
});
plotOptionsScenario('for day of week from/to region', ctx => {
const regions = [{ fromDayOfWeek: 7, toDayOfWeek: 7, fill: true, colorMode: 'red' }];
const from = moment('2018-01-01T18:45:05+01:00');
@@ -211,6 +238,42 @@ describe('TimeRegionManager', () => {
});
});
plotOptionsScenario('for day of week from/to time region', ctx => {
const regions = [{ fromDayOfWeek: 7, from: '23:00', toDayOfWeek: 1, to: '01:40', fill: true, colorMode: 'red' }];
const from = moment('2018-12-07T12:51:19+01:00');
const to = moment('2018-12-10T13:51:29+01:00');
ctx.setup(regions, from, to);
it('should add 1 marking', () => {
expect(ctx.options.grid.markings.length).toBe(1);
});
it('should add one fill between sunday 23:00 and monday 01:40', () => {
const markings = ctx.options.grid.markings;
expect(moment(markings[0].xaxis.from).format()).toBe(moment('2018-12-10T00:00:00+01:00').format());
expect(moment(markings[0].xaxis.to).format()).toBe(moment('2018-12-10T02:40:00+01:00').format());
});
});
plotOptionsScenario('for day of week from/to time region', ctx => {
const regions = [{ fromDayOfWeek: 6, from: '03:00', toDayOfWeek: 7, to: '02:00', fill: true, colorMode: 'red' }];
const from = moment('2018-12-07T12:51:19+01:00');
const to = moment('2018-12-10T13:51:29+01:00');
ctx.setup(regions, from, to);
it('should add 1 marking', () => {
expect(ctx.options.grid.markings.length).toBe(1);
});
it('should add one fill between saturday 03:00 and sunday 02:00', () => {
const markings = ctx.options.grid.markings;
expect(moment(markings[0].xaxis.from).format()).toBe(moment('2018-12-08T04:00:00+01:00').format());
expect(moment(markings[0].xaxis.to).format()).toBe(moment('2018-12-09T03:00:00+01:00').format());
});
});
plotOptionsScenario('for day of week from/to time region with daylight saving time', ctx => {
const regions = [{ fromDayOfWeek: 7, from: '20:00', toDayOfWeek: 7, to: '23:00', fill: true, colorMode: 'red' }];
const from = moment('2018-03-17T06:00:00+01:00');

View File

@@ -87,6 +87,14 @@ export class TimeRegionManager {
continue;
}
if (timeRegion.from && !timeRegion.to) {
timeRegion.to = timeRegion.from;
}
if (!timeRegion.from && timeRegion.to) {
timeRegion.from = timeRegion.to;
}
hRange = {
from: this.parseTimeRange(timeRegion.from),
to: this.parseTimeRange(timeRegion.to),
@@ -108,21 +116,13 @@ export class TimeRegionManager {
hRange.to.dayOfWeek = Number(timeRegion.toDayOfWeek);
}
if (!hRange.from.h && hRange.to.h) {
hRange.from = hRange.to;
}
if (hRange.from.h && !hRange.to.h) {
hRange.to = hRange.from;
}
if (hRange.from.dayOfWeek && !hRange.from.h && !hRange.from.m) {
if (hRange.from.dayOfWeek && hRange.from.h === null && hRange.from.m === null) {
hRange.from.h = 0;
hRange.from.m = 0;
hRange.from.s = 0;
}
if (hRange.to.dayOfWeek && !hRange.to.h && !hRange.to.m) {
if (hRange.to.dayOfWeek && hRange.to.h === null && hRange.to.m === null) {
hRange.to.h = 23;
hRange.to.m = 59;
hRange.to.s = 59;
@@ -169,8 +169,16 @@ export class TimeRegionManager {
fromEnd.add(hRange.to.h - hRange.from.h, 'hours');
} else if (hRange.from.h + hRange.to.h < 23) {
fromEnd.add(hRange.to.h, 'hours');
while (fromEnd.hour() !== hRange.to.h) {
fromEnd.add(-1, 'hours');
}
} else {
fromEnd.add(24 - hRange.from.h, 'hours');
while (fromEnd.hour() !== hRange.to.h) {
fromEnd.add(1, 'hours');
}
}
fromEnd.set('minute', hRange.to.m);

View File

@@ -107,7 +107,10 @@ class SingleStatCtrl extends MetricsPanelCtrl {
}
onDataReceived(dataList) {
const data: any = {};
const data: any = {
scopedVars: _.extend({}, this.panel.scopedVars),
};
if (dataList.length > 0 && dataList[0].type === 'table') {
this.dataType = 'table';
const tableData = dataList.map(this.tableHandler.bind(this));
@@ -117,6 +120,7 @@ class SingleStatCtrl extends MetricsPanelCtrl {
this.series = dataList.map(this.seriesHandler.bind(this));
this.setValues(data);
}
this.data = data;
this.render();
}
@@ -320,7 +324,6 @@ class SingleStatCtrl extends MetricsPanelCtrl {
}
// Add $__name variable for using in prefix or postfix
data.scopedVars = _.extend({}, this.panel.scopedVars);
data.scopedVars['__name'] = { value: this.series[0].label };
}
this.setValueMapping(data);

View File

@@ -199,7 +199,6 @@ small,
mark,
.mark {
padding: 0.2em;
background: $alert-warning-bg;
}

View File

@@ -19,16 +19,23 @@ div.flot-text {
.panel {
height: 100%;
}
&--solo {
position: fixed;
bottom: 0;
right: 0;
margin: 0;
.panel-container {
border: none;
z-index: $zindex-sidemenu + 1;
}
.panel-solo {
position: fixed;
bottom: 0;
right: 0;
margin: 0;
left: 0;
top: 0;
.panel-container {
border: none;
}
.panel-menu-toggle,
.panel-menu {
display: none;
}
}

View File

@@ -19,6 +19,12 @@
}
}
@for $i from 1 through 30 {
.min-width-#{$i} {
min-width: ($spacer * $i) - $gf-form-margin !important;
}
}
@for $i from 1 through 30 {
.offset-width-#{$i} {
margin-left: ($spacer * $i) !important;

View File

@@ -35,6 +35,8 @@ go run build.go -goarch arm64 -cc ${CCARM64} ${OPT} build
go run build.go -goos darwin -cc ${CCOSX64} ${OPT} build
go run build.go -goos windows -cc ${CCWIN64} ${OPT} build
# Do not remove CC from the linux build, its there for compatibility with Centos6
CC=${CCX64} go run build.go ${OPT} build
yarn install --pure-lockfile --no-progress
@@ -57,7 +59,7 @@ go run build.go ${OPT} build-frontend
source /etc/profile.d/rvm.sh
echo "Packaging"
go run build.go -goos linux -pkg-arch amd64 ${OPT} package-only latest
go run build.go -goos linux -pkg-arch amd64 ${OPT} package-only
#removing amd64 phantomjs bin for armv7/arm64 packages
rm tools/phantomjs/phantomjs
go run build.go -goos linux -pkg-arch armv7 ${OPT} package-only
@@ -78,3 +80,4 @@ else
fi
go run build.go -goos windows -pkg-arch amd64 ${OPT} package-only
go run build.go latest

View File

@@ -8,6 +8,8 @@ set -e
EXTRA_OPTS="$@"
CCARMV7=arm-linux-gnueabihf-gcc
CCARM64=aarch64-linux-gnu-gcc
CCX64=/tmp/x86_64-centos6-linux-gnu/bin/x86_64-centos6-linux-gnu-gcc
GOPATH=/go
@@ -26,6 +28,9 @@ fi
echo "Build arguments: $OPT"
go run build.go -goarch armv7 -cc ${CCARMV7} ${OPT} build
go run build.go -goarch arm64 -cc ${CCARM64} ${OPT} build
CC=${CCX64} go run build.go ${OPT} build
yarn install --pure-lockfile --no-progress
@@ -43,4 +48,8 @@ go run build.go ${OPT} build-frontend
source /etc/profile.d/rvm.sh
echo "Packaging"
go run build.go -goos linux -pkg-arch amd64 ${OPT} package-only latest
go run build.go -goos linux -pkg-arch amd64 ${OPT} package-only
go run build.go -goos linux -pkg-arch armv7 ${OPT} package-only
go run build.go -goos linux -pkg-arch arm64 ${OPT} package-only
go run build.go latest

View File

@@ -1,5 +1,25 @@
FROM circleci/golang:1.11
RUN git clone https://github.com/aptly-dev/aptly $GOPATH/src/github.com/aptly-dev/aptly && \
cd $GOPATH/src/github.com/aptly-dev/aptly && \
# pin aptly to a specific commit after 1.3.0 that contains gpg2 support
git reset --hard a64807efdaf5e380bfa878c71bc88eae10d62be1 && \
make install
FROM circleci/python:2.7-stretch
RUN sudo pip install awscli && \
curl https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-222.0.0-linux-x86_64.tar.gz | \
sudo tar xvzf - -C /opt
ENV PATH=$PATH:/opt/google-cloud-sdk/bin
USER root
RUN pip install awscli && \
curl https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-222.0.0-linux-x86_64.tar.gz | \
tar xvzf - -C /opt && \
apt update && \
apt install -y createrepo expect && \
apt-get autoremove -y && \
rm -rf /var/lib/apt/lists/*
COPY --from=0 /go/bin/aptly /usr/local/bin/aptly
USER circleci

View File

@@ -1,6 +1,6 @@
#!/bin/bash
_version="1.0.0"
_version="1.1.0"
_tag="grafana/grafana-ci-deploy:${_version}"
docker build -t $_tag .

View File

@@ -0,0 +1,7 @@
#!/usr/bin/env bash
set -e
git clone git@github.com:torkelo/private.git ~/private-repo
gpg --batch --allow-secret-key-import --import ~/private-repo/signing/private.key
pkill gpg-agent

View File

@@ -1,6 +1,15 @@
#!/bin/bash
cd ..
git clone -b master --single-branch git@github.com:grafana/grafana-enterprise.git --depth 1
if [ -z "$CIRCLE_TAG" ]; then
_target="master"
else
_target="$CIRCLE_TAG"
fi
git clone -b "$_target" --single-branch git@github.com:grafana/grafana-enterprise.git --depth 1
cd grafana-enterprise
./build.sh

View File

@@ -4,10 +4,10 @@
EXTRA_OPTS="$@"
# Right now we hack this in into the publish script.
# Right now we hack this in into the publish script.
# Eventually we might want to keep a list of all previous releases somewhere.
_releaseNoteUrl="https://community.grafana.com/t/release-notes-v5-3-x/10244"
_whatsNewUrl="http://docs.grafana.org/guides/whats-new-in-v5-3/"
_releaseNoteUrl="https://community.grafana.com/t/release-notes-v5-4-x/12215"
_whatsNewUrl="http://docs.grafana.org/guides/whats-new-in-v5-4/"
./scripts/build/release_publisher/release_publisher \
--wn ${_whatsNewUrl} \

View File

@@ -105,6 +105,6 @@ func TestFileWalker(t *testing.T) {
incorrectPackageName := "grafana_5.2.0-474pre1_armfoo.deb"
_, err := mapPackage(incorrectPackageName, incorrectPackageName, []byte{})
if err == nil {
t.Errorf("Testing (%v), expected to fail due to an unrecognized arch, but signalled no error.", incorrectPackageName)
t.Errorf("Testing (%v), expected to fail due to an unrecognized arch, but signaled no error.", incorrectPackageName)
}
}

View File

@@ -41,12 +41,12 @@ func main() {
var builder releaseBuilder
var product string
archiveProviderRoot := "https://s3-us-west-2.amazonaws.com"
archiveProviderRoot := "https://dl.grafana.com"
buildArtifacts := completeBuildArtifactConfigurations
if enterprise {
product = "grafana-enterprise"
baseUrl = createBaseUrl(archiveProviderRoot, "grafana-enterprise-releases", product, nightly)
baseUrl = createBaseUrl(archiveProviderRoot, "enterprise", product, nightly)
var err error
buildArtifacts, err = filterBuildArtifacts([]artifactFilter{
{os: "deb", arch: "amd64"},
@@ -61,7 +61,7 @@ func main() {
} else {
product = "grafana"
baseUrl = createBaseUrl(archiveProviderRoot, "grafana-releases", product, nightly)
baseUrl = createBaseUrl(archiveProviderRoot, "oss", product, nightly)
}
if fromLocal {

View File

@@ -0,0 +1,27 @@
{
"rootDir": "/deb-repo/db",
"downloadConcurrency": 4,
"downloadSpeedLimit": 0,
"architectures": [],
"dependencyFollowSuggests": false,
"dependencyFollowRecommends": false,
"dependencyFollowAllVariants": false,
"dependencyFollowSource": false,
"dependencyVerboseResolve": false,
"gpgDisableSign": false,
"gpgDisableVerify": false,
"gpgProvider": "gpg2",
"downloadSourcePackages": false,
"skipLegacyPool": true,
"ppaDistributorID": "ubuntu",
"ppaCodename": "",
"skipContentsPublishing": false,
"FileSystemPublishEndpoints": {
"repo": {
"rootDir": "/deb-repo/repo",
"linkMethod": "copy"
}
},
"S3PublishEndpoints": {},
"SwiftPublishEndpoints": {}
}

View File

@@ -0,0 +1,7 @@
#!/usr/bin/env expect
set password [lindex $argv 0]
spawn gpg --detach-sign --armor /rpm-repo/repodata/repomd.xml
expect "Enter passphrase: "
send -- "$password\r"
expect eof

View File

@@ -0,0 +1,7 @@
#!/usr/bin/env expect
set password [lindex $argv 0]
spawn gpg --detach-sign --armor /tmp/sign-this
expect "Enter passphrase: "
send -- "$password\r"
expect eof

View File

@@ -0,0 +1,58 @@
#!/usr/bin/env bash
RELEASE_TYPE="${1:-}"
GPG_PASS="${2:-}"
RELEASE_TAG="${3:-}"
REPO="grafana"
if [ -z "$RELEASE_TYPE" -o -z "$GPG_PASS" ]; then
echo "Both RELEASE_TYPE (arg 1) and GPG_PASS (arg 2) has to be set"
exit 1
fi
if [[ "$RELEASE_TYPE" != "oss" && "$RELEASE_TYPE" != "enterprise" ]]; then
echo "RELEASE_TYPE (arg 1) must be either oss or enterprise."
exit 1
fi
if echo "$RELEASE_TAG" | grep -q "beta"; then
REPO="beta"
fi
set -e
# Setup environment
cp scripts/build/update_repo/aptly.conf /etc/aptly.conf
mkdir -p /deb-repo/db \
/deb-repo/repo \
/deb-repo/tmp
# Download the database
gsutil -m rsync -r "gs://grafana-aptly-db/$RELEASE_TYPE" /deb-repo/db
# Add the new release to the repo
aptly publish drop grafana filesystem:repo:grafana || true
aptly publish drop beta filesystem:repo:grafana || true
cp ./dist/*.deb /deb-repo/tmp
rm /deb-repo/tmp/grafana_latest*.deb || true
aptly repo add "$REPO" ./dist
# Setup signing and sign the repo
echo "allow-loopback-pinentry" > ~/.gnupg/gpg-agent.conf
echo "pinentry-mode loopback" > ~/.gnupg/gpg.conf
touch /tmp/sign-this
./scripts/build/update_repo/unlock-gpg-key.sh "$GPG_PASS"
rm /tmp/sign-this /tmp/sign-this.asc
aptly publish repo grafana filesystem:repo:grafana
aptly publish repo beta filesystem:repo:grafana
# Update the repo and db on gcp
gsutil -m rsync -r -d /deb-repo/db "gs://grafana-aptly-db/$RELEASE_TYPE"
gsutil -m rsync -r -d /deb-repo/repo/grafana "gs://grafana-repo/$RELEASE_TYPE/deb"
# usage:
#
# deb https://packages.grafana.com/oss/deb stable main

View File

@@ -0,0 +1,59 @@
#!/usr/bin/env bash
RELEASE_TYPE="${1:-}"
GPG_PASS="${2:-}"
RELEASE_TAG="${3:-}"
REPO="rpm"
if [ -z "$RELEASE_TYPE" -o -z "$GPG_PASS" ]; then
echo "Both RELEASE_TYPE (arg 1) and GPG_PASS (arg 2) has to be set"
exit 1
fi
if [[ "$RELEASE_TYPE" != "oss" && "$RELEASE_TYPE" != "enterprise" ]]; then
echo "RELEASE_TYPE (arg 1) must be either oss or enterprise."
exit 1
fi
if echo "$RELEASE_TAG" | grep -q "beta"; then
REPO="rpm-beta"
fi
set -e
# Setup environment
BUCKET="gs://grafana-repo/$RELEASE_TYPE/$REPO"
mkdir -p /rpm-repo
# Download the database
gsutil -m rsync -r "$BUCKET" /rpm-repo
# Add the new release to the repo
cp ./dist/*.rpm /rpm-repo
rm /rpm-repo/grafana-latest-1*.rpm || true
cd /rpm-repo
createrepo .
# Setup signing and sign the repo
echo "allow-loopback-pinentry" > ~/.gnupg/gpg-agent.conf
echo "pinentry-mode loopback" > ~/.gnupg/gpg.conf
rm /rpm-repo/repodata/repomd.xml.asc || true
pkill gpg-agent || true
./scripts/build/update_repo/sign-rpm-repo.sh "$GPG_PASS"
# Update the repo and db on gcp
gsutil -m rsync -r -d /rpm-repo "$BUCKET"
# usage:
# [grafana]
# name=grafana
# baseurl=https://packages.grafana.com/oss/rpm
# repo_gpgcheck=1
# enabled=1
# gpgcheck=1
# gpgkey=https://packages.grafana.com/gpg.key
# sslverify=1
# sslcacert=/etc/pki/tls/certs/ca-bundle.crt