mirror of
https://github.com/grafana/grafana.git
synced 2026-01-07 12:32:58 +08:00
Compare commits
618 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0952847a02 | ||
|
|
ec2b0fe6bf | ||
|
|
7490963cc1 | ||
|
|
a220c84ec0 | ||
|
|
155b426376 | ||
|
|
6aef001647 | ||
|
|
fe3c6ac355 | ||
|
|
2840968861 | ||
|
|
0a8a6fd872 | ||
|
|
24d69ca205 | ||
|
|
085245f97d | ||
|
|
c114c46119 | ||
|
|
bbe191a975 | ||
|
|
f8326c29f9 | ||
|
|
491e6897e7 | ||
|
|
5b74bea476 | ||
|
|
314bff1b78 | ||
|
|
9c30bf53cf | ||
|
|
4bc6ecb241 | ||
|
|
6372e22180 | ||
|
|
f842265388 | ||
|
|
bf138d1845 | ||
|
|
c177a9a77a | ||
|
|
6d22a67a30 | ||
|
|
6b1ae1a8a8 | ||
|
|
dd9d4ecfbe | ||
|
|
fca80ff525 | ||
|
|
2de94d6548 | ||
|
|
788f677ed7 | ||
|
|
d6b8c6a2d2 | ||
|
|
c1ef89d3bf | ||
|
|
aa24839ac9 | ||
|
|
337fb9bdfa | ||
|
|
512535e8ff | ||
|
|
4e719aa0c1 | ||
|
|
29de6387cb | ||
|
|
a76f3fba2e | ||
|
|
d71456b96f | ||
|
|
85bd1861d6 | ||
|
|
1a53d6453b | ||
|
|
10f058036c | ||
|
|
73248d6687 | ||
|
|
b349b3ef55 | ||
|
|
dd67c9d420 | ||
|
|
968ba30fa5 | ||
|
|
379f984293 | ||
|
|
3c16230158 | ||
|
|
fd76c855f9 | ||
|
|
e16cadaa27 | ||
|
|
665fef0114 | ||
|
|
ff4cd1aef6 | ||
|
|
0e18c82d95 | ||
|
|
a7b0a01b13 | ||
|
|
18cfe1b192 | ||
|
|
f6100dd8eb | ||
|
|
ee4bbf61a9 | ||
|
|
7164183739 | ||
|
|
90602942f5 | ||
|
|
6e344f6eea | ||
|
|
057b8a6b2d | ||
|
|
e5f84484eb | ||
|
|
b1580bf385 | ||
|
|
293daf2ca1 | ||
|
|
75c4c4d8b5 | ||
|
|
2953d64429 | ||
|
|
4b851de648 | ||
|
|
1076b75a21 | ||
|
|
b1506a2b09 | ||
|
|
e13575eaef | ||
|
|
7c566a6539 | ||
|
|
d971eddc7a | ||
|
|
ce0de35a6c | ||
|
|
93be16b106 | ||
|
|
8ec33574a1 | ||
|
|
de647c4100 | ||
|
|
51d021a337 | ||
|
|
7dc6d67d46 | ||
|
|
62979916ad | ||
|
|
4670c74942 | ||
|
|
c9a30ef149 | ||
|
|
9dac602e98 | ||
|
|
652fce7e76 | ||
|
|
e98254e1e3 | ||
|
|
4446e95155 | ||
|
|
5d4b8b5a5c | ||
|
|
095ac80d19 | ||
|
|
9451e6af39 | ||
|
|
82d365bdb8 | ||
|
|
ec64dc8fae | ||
|
|
b333ad9481 | ||
|
|
22be9436f7 | ||
|
|
bc690232fb | ||
|
|
0005f8e7b3 | ||
|
|
4e387ed47a | ||
|
|
52676161bb | ||
|
|
01e2729dba | ||
|
|
a1faa6d4d9 | ||
|
|
1a3d34b918 | ||
|
|
5d3c3a6889 | ||
|
|
4eabf6ce81 | ||
|
|
2789c4f1e1 | ||
|
|
9bafb108f7 | ||
|
|
88e4b6ee75 | ||
|
|
2fc67da69a | ||
|
|
6e4090abf2 | ||
|
|
284cacf560 | ||
|
|
c5400ffe76 | ||
|
|
c3cffeb10c | ||
|
|
ce6050f5d6 | ||
|
|
07e192ff47 | ||
|
|
e8e1f1bc6d | ||
|
|
40c008f870 | ||
|
|
66441650cd | ||
|
|
91fb3f2224 | ||
|
|
7b1473649b | ||
|
|
eb0f5cc900 | ||
|
|
84d4958a3c | ||
|
|
40ae2cef38 | ||
|
|
52177a45d9 | ||
|
|
bdfbc2453f | ||
|
|
9b60a63778 | ||
|
|
abeaef71cc | ||
|
|
54129b1340 | ||
|
|
dda1cf1a88 | ||
|
|
bdb1cfb008 | ||
|
|
c5c5fa0db3 | ||
|
|
78ea1ea76e | ||
|
|
bdd239e933 | ||
|
|
aef4eca254 | ||
|
|
74ddebc8ea | ||
|
|
546f903141 | ||
|
|
c2b05341e5 | ||
|
|
c92317bafa | ||
|
|
407f1ad475 | ||
|
|
ead7ae7635 | ||
|
|
311fa66590 | ||
|
|
72690b3962 | ||
|
|
62f5770908 | ||
|
|
bf110d02d1 | ||
|
|
e91cf28f8d | ||
|
|
3b9fbd60d8 | ||
|
|
ad14ccf4bd | ||
|
|
9375441991 | ||
|
|
ef3c543620 | ||
|
|
54f932e960 | ||
|
|
451ecce4ab | ||
|
|
ef162c3dc7 | ||
|
|
3eea5d235d | ||
|
|
d6085755c4 | ||
|
|
a0cb744746 | ||
|
|
dc7616e560 | ||
|
|
f7ec8e0de0 | ||
|
|
47f8145246 | ||
|
|
71c22fdbff | ||
|
|
71c9582944 | ||
|
|
632a5e9b97 | ||
|
|
3c9798bec9 | ||
|
|
bf4ffe41b0 | ||
|
|
e763104a6f | ||
|
|
4f9fbcc211 | ||
|
|
bcb3dfac9f | ||
|
|
8bf49c51b9 | ||
|
|
0ba5706400 | ||
|
|
63d6ab476a | ||
|
|
d19aa89ec0 | ||
|
|
bbf9d7677b | ||
|
|
a31d0df897 | ||
|
|
5c2958023d | ||
|
|
7b5019482e | ||
|
|
649122b4fb | ||
|
|
e3bd51e38d | ||
|
|
73f0bd3b4b | ||
|
|
3222677493 | ||
|
|
020aae1427 | ||
|
|
17cb3f6e6a | ||
|
|
d4dd0222fa | ||
|
|
167995776f | ||
|
|
02248273ae | ||
|
|
c4feef2541 | ||
|
|
5420f271f1 | ||
|
|
776e6e2c3f | ||
|
|
679ddc9696 | ||
|
|
b716a2595a | ||
|
|
d8d59fd8d4 | ||
|
|
c7959ff06e | ||
|
|
d2437d3cf1 | ||
|
|
b260a642e7 | ||
|
|
e75b39376a | ||
|
|
6b5400bdb6 | ||
|
|
149e694ce6 | ||
|
|
0b2cc404ff | ||
|
|
b9540e4c49 | ||
|
|
43fa852cc1 | ||
|
|
1105bb371f | ||
|
|
59513ff963 | ||
|
|
459a8ed695 | ||
|
|
80f5c914e8 | ||
|
|
68cac6ccb5 | ||
|
|
668cb3c1ef | ||
|
|
e8a20643d6 | ||
|
|
5c0773023f | ||
|
|
1c8c746956 | ||
|
|
eac0d30e99 | ||
|
|
26ad025705 | ||
|
|
aa670244f1 | ||
|
|
e0bbb74b0c | ||
|
|
0e0e60317e | ||
|
|
152c4170da | ||
|
|
1652426dfe | ||
|
|
68c4ce34d5 | ||
|
|
fec41cf038 | ||
|
|
fee1e6df41 | ||
|
|
addeccb4f0 | ||
|
|
a47b31ac62 | ||
|
|
cd16db4d09 | ||
|
|
563795245a | ||
|
|
59d1654df4 | ||
|
|
2fb25d8fa4 | ||
|
|
a8ac215039 | ||
|
|
eb944551c0 | ||
|
|
928fedaad7 | ||
|
|
d3680d290c | ||
|
|
83beff300b | ||
|
|
64b5153e7d | ||
|
|
781dd25c82 | ||
|
|
f979e95820 | ||
|
|
1507c02ebb | ||
|
|
7c14ade623 | ||
|
|
7031f7e772 | ||
|
|
bf8171fd63 | ||
|
|
b74fe05fec | ||
|
|
ecb66e7678 | ||
|
|
e4dabb657c | ||
|
|
48fa642727 | ||
|
|
3e392ef921 | ||
|
|
0e2b809f7e | ||
|
|
4fe79edd40 | ||
|
|
35522c475f | ||
|
|
92f71136e6 | ||
|
|
ee835242ca | ||
|
|
def45f55da | ||
|
|
54c79c5648 | ||
|
|
649fe7e462 | ||
|
|
0d25357367 | ||
|
|
aa889e59a1 | ||
|
|
d285045ff6 | ||
|
|
132cd36b0c | ||
|
|
1e5778174c | ||
|
|
c082372ffe | ||
|
|
4f9cfcd632 | ||
|
|
0bd03098ea | ||
|
|
ee297487a5 | ||
|
|
8930e04f2b | ||
|
|
97d1676fe8 | ||
|
|
f547c93a4f | ||
|
|
58528a951a | ||
|
|
6e0f767af7 | ||
|
|
0a23f3dd28 | ||
|
|
26766ddb27 | ||
|
|
c0b0a54a8f | ||
|
|
8e618bc169 | ||
|
|
6131347806 | ||
|
|
ef0c90b9ca | ||
|
|
1ce134cc6f | ||
|
|
8f5fbb4254 | ||
|
|
c7ca4830a8 | ||
|
|
1372d2e517 | ||
|
|
77b7f4b376 | ||
|
|
c7e8b98d14 | ||
|
|
8d7186d40d | ||
|
|
3dc9d76b38 | ||
|
|
a5bdfec0de | ||
|
|
b5017d1e18 | ||
|
|
b241b98196 | ||
|
|
a5d5f3d82f | ||
|
|
c17b5d1306 | ||
|
|
ede827f5c0 | ||
|
|
6224a25e42 | ||
|
|
f7ea08dba7 | ||
|
|
814d164e5b | ||
|
|
663a3293ee | ||
|
|
b01a4e6583 | ||
|
|
f64ebf538c | ||
|
|
91a921e12d | ||
|
|
e72baca4a7 | ||
|
|
2aa26c98b6 | ||
|
|
ae3e869d70 | ||
|
|
deab5a54bf | ||
|
|
ecfe28d7ac | ||
|
|
9fb22ef86c | ||
|
|
5d2054323b | ||
|
|
1e4b0f01d6 | ||
|
|
6d9cbdd59e | ||
|
|
c4eadb576e | ||
|
|
7de1c0eaa2 | ||
|
|
73a1a262ef | ||
|
|
e9989cb690 | ||
|
|
0fcc87010a | ||
|
|
91a533d948 | ||
|
|
ede6d71e72 | ||
|
|
4d82a1076a | ||
|
|
44fbd3ec9f | ||
|
|
c79a68dcd1 | ||
|
|
55b24be115 | ||
|
|
bf7516d9bf | ||
|
|
81dc64d4ec | ||
|
|
c7c5730c40 | ||
|
|
34cda65998 | ||
|
|
61d8eb1fac | ||
|
|
d630f3d59e | ||
|
|
06e87c151f | ||
|
|
73563d087c | ||
|
|
23558c61d4 | ||
|
|
ef9dd014c7 | ||
|
|
5406481e7b | ||
|
|
510ad001b4 | ||
|
|
0c70d271dc | ||
|
|
417c5163dd | ||
|
|
5542309170 | ||
|
|
6b344d0158 | ||
|
|
22758d5112 | ||
|
|
7b768bca3f | ||
|
|
c4683f1ae8 | ||
|
|
cb8ecb2d5f | ||
|
|
ce4939bd56 | ||
|
|
d46fd890a5 | ||
|
|
8ca08d65e7 | ||
|
|
f5ed5c6529 | ||
|
|
9825cd1ebb | ||
|
|
0f33ac9e10 | ||
|
|
24a79a6bea | ||
|
|
723f5bc9e8 | ||
|
|
287c222fc7 | ||
|
|
6956110497 | ||
|
|
f500dfd1e5 | ||
|
|
cd0e1a8978 | ||
|
|
2d2800e710 | ||
|
|
4a678c2884 | ||
|
|
1a25b78ef2 | ||
|
|
bbfdfc012f | ||
|
|
b16b649c9b | ||
|
|
9d50ab8fb5 | ||
|
|
af56cc4c28 | ||
|
|
e8157f01ec | ||
|
|
aecc6860e3 | ||
|
|
9bbc942534 | ||
|
|
13c966c178 | ||
|
|
4073949af0 | ||
|
|
a6920eb2ff | ||
|
|
32f9a42d5e | ||
|
|
c5f6cf0809 | ||
|
|
a045f175b6 | ||
|
|
17d57ab074 | ||
|
|
74984f8da6 | ||
|
|
e4950c2dc1 | ||
|
|
4844bf9be3 | ||
|
|
43974898c1 | ||
|
|
bb3bcbe6a1 | ||
|
|
228c210379 | ||
|
|
92937c591f | ||
|
|
89822acf38 | ||
|
|
2d99518f90 | ||
|
|
5bffdfe9a4 | ||
|
|
f8d22385ce | ||
|
|
1dedc886f2 | ||
|
|
1ccfca109f | ||
|
|
68e7219135 | ||
|
|
3a075be7e9 | ||
|
|
fc82de1543 | ||
|
|
c296e73b83 | ||
|
|
a622c55b1b | ||
|
|
e6f9546a7c | ||
|
|
fe3bcd3678 | ||
|
|
cff1c37064 | ||
|
|
e5b499a958 | ||
|
|
1994b767d3 | ||
|
|
eca11e862e | ||
|
|
fbff905c57 | ||
|
|
a4266319ad | ||
|
|
49113c1ccb | ||
|
|
6d4c29ea9b | ||
|
|
ab53b05c39 | ||
|
|
ba1c79b7a8 | ||
|
|
de9ebc9a74 | ||
|
|
9341412acc | ||
|
|
16cda723d3 | ||
|
|
36a1ab48c5 | ||
|
|
3583057155 | ||
|
|
1940b33dc1 | ||
|
|
d20455ab5f | ||
|
|
934c0fea6f | ||
|
|
c1c1bcb874 | ||
|
|
1da98f5e1e | ||
|
|
74093c700f | ||
|
|
f773a9b4c3 | ||
|
|
205be91a84 | ||
|
|
109fd998ed | ||
|
|
a5afd8152d | ||
|
|
3ae5f7c632 | ||
|
|
a71423481b | ||
|
|
20a2334c87 | ||
|
|
6f4c7a4d65 | ||
|
|
e269b3b2a0 | ||
|
|
1499c2bf74 | ||
|
|
b8aa203707 | ||
|
|
1fd7b60efe | ||
|
|
fb99ddf295 | ||
|
|
8683aff3e9 | ||
|
|
7ea5930a90 | ||
|
|
97a7081b57 | ||
|
|
8634c9d457 | ||
|
|
3ac306a72e | ||
|
|
91ad260517 | ||
|
|
8973b48f96 | ||
|
|
1a61d2814c | ||
|
|
b674b9dba2 | ||
|
|
83fbace6b9 | ||
|
|
12644372c4 | ||
|
|
c12a7d7f59 | ||
|
|
7c840cdf38 | ||
|
|
b63d2b3279 | ||
|
|
8e5672aee6 | ||
|
|
53ea9cfbcf | ||
|
|
1deeef9e91 | ||
|
|
10127e8ac9 | ||
|
|
8c8b1dde8a | ||
|
|
e8d01218d8 | ||
|
|
5aac2d2078 | ||
|
|
60da730c95 | ||
|
|
73fcc919cd | ||
|
|
be29357d22 | ||
|
|
86a73c359b | ||
|
|
a3d22ae9c7 | ||
|
|
b54b43a42e | ||
|
|
bc6a57ce32 | ||
|
|
2479e51a6b | ||
|
|
2a93bed453 | ||
|
|
eaba985f25 | ||
|
|
77136d7a70 | ||
|
|
8440d2d0a2 | ||
|
|
41d300f69d | ||
|
|
9a7e460865 | ||
|
|
8626bdfed8 | ||
|
|
1f4140057b | ||
|
|
0eb297822c | ||
|
|
ad080af38f | ||
|
|
49fdbb3843 | ||
|
|
724368d0cd | ||
|
|
25f88e9b3a | ||
|
|
39ffb04be1 | ||
|
|
056c57d551 | ||
|
|
5d63ad21c1 | ||
|
|
a49e82e447 | ||
|
|
e9c8881d54 | ||
|
|
840099bec0 | ||
|
|
76c4bfe268 | ||
|
|
5f3b5fdcb2 | ||
|
|
6a95df403a | ||
|
|
380e7e7f04 | ||
|
|
581b977787 | ||
|
|
c771dd4bd2 | ||
|
|
cb720d8eaf | ||
|
|
9ff4ab1236 | ||
|
|
1f92e589e8 | ||
|
|
812ac5cb8e | ||
|
|
7d642546b3 | ||
|
|
192c447c2c | ||
|
|
d10d897d65 | ||
|
|
6992b484bc | ||
|
|
cdd5ba6198 | ||
|
|
6c04057285 | ||
|
|
217c746445 | ||
|
|
95c8a76aa6 | ||
|
|
a8e9700334 | ||
|
|
50b09f4f10 | ||
|
|
2c75593c1a | ||
|
|
3c41d0477a | ||
|
|
f7c48c5a5f | ||
|
|
c5d5d7ac5a | ||
|
|
71b62f5cf9 | ||
|
|
922073a357 | ||
|
|
aa3a737fea | ||
|
|
f0169656ba | ||
|
|
525da95f49 | ||
|
|
01ff3bbe0a | ||
|
|
61bdc91272 | ||
|
|
08b37186a5 | ||
|
|
5225e4283f | ||
|
|
e7e675e471 | ||
|
|
391dc1e225 | ||
|
|
46412c8475 | ||
|
|
3ba8aeb9a7 | ||
|
|
64d620c987 | ||
|
|
f4debbf501 | ||
|
|
56b3c4a3a0 | ||
|
|
577dfee086 | ||
|
|
8f6c9c5946 | ||
|
|
ef1dfed0d8 | ||
|
|
948e5ae74d | ||
|
|
c4e872b9da | ||
|
|
7b5f7ed553 | ||
|
|
5409f4c0eb | ||
|
|
9b629cd5a6 | ||
|
|
546d489dd3 | ||
|
|
88da3a99e1 | ||
|
|
689e366f59 | ||
|
|
e2061312f5 | ||
|
|
e43d09e15b | ||
|
|
746d6cdc88 | ||
|
|
9c1401849e | ||
|
|
2a52e25d5b | ||
|
|
cabbfe9adc | ||
|
|
f18ebea03e | ||
|
|
82d4d54dc5 | ||
|
|
c87418d060 | ||
|
|
12219cffe0 | ||
|
|
c296ae1178 | ||
|
|
e18007153d | ||
|
|
1b79e17970 | ||
|
|
fdfcd5cbf0 | ||
|
|
bab21c9069 | ||
|
|
f3980504e2 | ||
|
|
1efdd92ae8 | ||
|
|
7c1dc2444d | ||
|
|
f224fd8310 | ||
|
|
4fe9935321 | ||
|
|
d996275f8f | ||
|
|
d2eca2faa1 | ||
|
|
045f5e11fc | ||
|
|
d55cc4e2a3 | ||
|
|
966c2912fc | ||
|
|
d47c47853a | ||
|
|
3bea304bab | ||
|
|
e9d5e037e8 | ||
|
|
77c046aac6 | ||
|
|
1bdf82dca3 | ||
|
|
6783d1000c | ||
|
|
95a4ec8bf2 | ||
|
|
ed4d170bcc | ||
|
|
812958162a | ||
|
|
cd3807055e | ||
|
|
26ec874fb1 | ||
|
|
e4cb103966 | ||
|
|
5c1833de1f | ||
|
|
f16e3e38ee | ||
|
|
d318c9093f | ||
|
|
c95162e067 | ||
|
|
0d865a83bc | ||
|
|
980b9b5ca0 | ||
|
|
371625aeec | ||
|
|
beced6f3a6 | ||
|
|
5e0b03928e | ||
|
|
0d39852ef4 | ||
|
|
ab44c7d63e | ||
|
|
ed2092e287 | ||
|
|
c0d5b61403 | ||
|
|
7004a84c30 | ||
|
|
c2885430bd | ||
|
|
e65f86147f | ||
|
|
c17d02e496 | ||
|
|
ee0d0155a5 | ||
|
|
f484b4c347 | ||
|
|
3292a48381 | ||
|
|
8422697199 | ||
|
|
a927b893ae | ||
|
|
e6616cc551 | ||
|
|
60d5d5fb15 | ||
|
|
68397d342b | ||
|
|
09267bbfe8 | ||
|
|
0a1c2a7024 | ||
|
|
b6e46c9eb8 | ||
|
|
d1d47b5697 | ||
|
|
90871ca12e | ||
|
|
896f849b84 | ||
|
|
ac28c4b233 | ||
|
|
8b7a0100b1 | ||
|
|
dcf7385cc1 | ||
|
|
090594a0bc | ||
|
|
007c08f2a8 | ||
|
|
2d29d7b3d6 | ||
|
|
3133721422 | ||
|
|
28bff0c1f3 | ||
|
|
b17a1ed557 | ||
|
|
e6d79dfedf | ||
|
|
5740702cb5 | ||
|
|
a5318def66 | ||
|
|
634ee0f9fa | ||
|
|
6541ffe045 | ||
|
|
12c8bf9b18 | ||
|
|
d4048e1423 | ||
|
|
6257fbced6 | ||
|
|
330cb92b24 | ||
|
|
3695337980 | ||
|
|
648e8a9547 | ||
|
|
1e3a42ca68 | ||
|
|
21e61319f1 | ||
|
|
48fdfe721e | ||
|
|
bd8a0be3aa | ||
|
|
7cb6466251 | ||
|
|
d840645dd7 | ||
|
|
a8673a2e33 | ||
|
|
4a2c405ac0 | ||
|
|
499e01d832 | ||
|
|
5e090b84ec | ||
|
|
b8aa6a8e47 | ||
|
|
912301fe24 | ||
|
|
5909f9ef92 | ||
|
|
5fcb966297 | ||
|
|
6ad1a396a5 | ||
|
|
5513d3c9d1 | ||
|
|
9c35d3f87c | ||
|
|
f7a6c9a1e6 | ||
|
|
f65878c21d | ||
|
|
78dbb4dc13 | ||
|
|
d5481fa0f1 | ||
|
|
fbc3e0371d | ||
|
|
86ce3d5e45 | ||
|
|
b65642564a |
12
.flooignore
12
.flooignore
@@ -1,12 +0,0 @@
|
||||
#*
|
||||
*.o
|
||||
*.pyc
|
||||
*.pyo
|
||||
*~
|
||||
extern/
|
||||
node_modules/
|
||||
tmp/
|
||||
data/
|
||||
vendor/
|
||||
public_gen/
|
||||
dist/
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -9,13 +9,12 @@ awsconfig
|
||||
/public/vendor/npm
|
||||
/tmp
|
||||
vendor/phantomjs/phantomjs
|
||||
vendor/phantomjs/phantomjs.exe
|
||||
|
||||
docs/AWS_S3_BUCKET
|
||||
docs/GIT_BRANCH
|
||||
docs/VERSION
|
||||
docs/GITCOMMIT
|
||||
docs/changed-files
|
||||
docs/changed-files
|
||||
|
||||
# locally required config files
|
||||
public/css/*.min.css
|
||||
@@ -40,3 +39,5 @@ profile.cov
|
||||
/pkg/cmd/grafana-cli/grafana-cli
|
||||
/pkg/cmd/grafana-server/grafana-server
|
||||
/examples/*/dist
|
||||
/packaging/**/*.rpm
|
||||
/packaging/**/*.deb
|
||||
|
||||
123
CHANGELOG.md
123
CHANGELOG.md
@@ -1,4 +1,123 @@
|
||||
# 4.3.0-stable (unreleased)
|
||||
# 5.0.0 (unreleased)
|
||||
|
||||
### WIP (in develop branch currently as its unstable or unfinished)
|
||||
- Dashboard folders
|
||||
- User groups
|
||||
- Dashboard permissions (on folder & dashboard level), permissions can be assigned to groups or individual users
|
||||
- UX changes to nav & side menu
|
||||
- New dashboard grid layout system
|
||||
|
||||
# 4.5.0 (2017-09-14)
|
||||
|
||||
## Fixes & Enhancements since beta1
|
||||
* **Security**: Security fix for api vulnerability (in multiple org setups).
|
||||
* **Shortcuts**: Adds shortcut for creating new dashboard [#8876](https://github.com/grafana/grafana/pull/8876) thx [@mtanda](https://github.com/mtanda)
|
||||
* **Graph**: Right Y-Axis label position fixed [#9172](https://github.com/grafana/grafana/pull/9172)
|
||||
* **General**: Improve rounding of time intervals [#9197](https://github.com/grafana/grafana/pull/9197), thx [@alin-amana](https://github.com/alin-amana)
|
||||
|
||||
# 4.5.0-beta1 (2017-09-05)
|
||||
|
||||
## New Features
|
||||
|
||||
* **Table panel**: Render cell values as links that can have an url template that uses variables from current table row. [#3754](https://github.com/grafana/grafana/issues/3754)
|
||||
* **Elasticsearch**: Add ad hoc filters directly by clicking values in table panel [#8052](https://github.com/grafana/grafana/issues/8052).
|
||||
* **MySQL**: New rich query editor with syntax highlighting
|
||||
* **Prometheus**: New rich query editor with syntax highlighting, metric & range auto complete and integrated function docs. [#5117](https://github.com/grafana/grafana/issues/5117)
|
||||
|
||||
## Enhancements
|
||||
|
||||
* **GitHub OAuth**: Support for GitHub organizations with 100+ teams. [#8846](https://github.com/grafana/grafana/issues/8846), thx [@skwashd](https://github.com/skwashd)
|
||||
* **Graphite**: Calls to Graphite api /metrics/find now include panel or dashboad time range (from & until) in most cases, [#8055](https://github.com/grafana/grafana/issues/8055)
|
||||
* **Graphite**: Added new graphite 1.0 functions, available if you set version to 1.0.x in data source settings. New Functions: mapSeries, reduceSeries, isNonNull, groupByNodes, offsetToZero, grep, weightedAverage, removeEmptySeries, aggregateLine, averageOutsidePercentile, delay, exponentialMovingAverage, fallbackSeries, integralByInterval, interpolate, invert, linearRegression, movingMin, movingMax, movingSum, multiplySeriesWithWildcards, pow, powSeries, removeBetweenPercentile, squareRoot, timeSlice, closes [#8261](https://github.com/grafana/grafana/issues/8261)
|
||||
- **Elasticsearch**: Ad-hoc filters now use query phrase match filters instead of term filters, works on non keyword/raw fields [#9095](https://github.com/grafana/grafana/issues/9095).
|
||||
|
||||
### Breaking change
|
||||
|
||||
* **InfluxDB/Elasticsearch**: The panel & data source option named "Group by time interval" is now named "Min time interval" and does now always define a lower limit for the auto group by time. Without having to use `>` prefix (that prefix still works). This should in theory have close to zero actual impact on existing dashboards. It does mean that if you used this setting to define a hard group by time interval of, say "1d", if you zoomed to a time range wide enough the time range could increase above the "1d" range as the setting is now always considered a lower limit.
|
||||
|
||||
## Changes
|
||||
|
||||
* **InfluxDB**: Change time range filter for absolute time ranges to be inclusive instead of exclusive [#8319](https://github.com/grafana/grafana/issues/8319), thx [@Oxydros](https://github.com/Oxydros)
|
||||
* **InfluxDB**: Added paranthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* **Modals**: Maintain scroll position after opening/leaving modal [#8800](https://github.com/grafana/grafana/issues/8800)
|
||||
* **Templating**: You cannot select data source variables as data source for other template variables [#7510](https://github.com/grafana/grafana/issues/7510)
|
||||
* **MySQL/Postgres**: Fix for max_idle_conn option default which was wrongly set to zero which does not mean unlimited but means zero, which in practice kind of disables connection pooling, which is not good. Fixes [#8513](https://github.com/grafana/grafana/issues/8513)
|
||||
|
||||
# 4.4.3 (2017-08-07)
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* **Search**: Fix for issue that casued search view to hide when you clicked starred or tags filters, fixes [#8981](https://github.com/grafana/grafana/issues/8981)
|
||||
* **Modals**: ESC key now closes modal again, fixes [#8981](https://github.com/grafana/grafana/issues/8988), thx [@j-white](https://github.com/j-white)
|
||||
|
||||
# 4.4.2 (2017-08-01)
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* **GrafanaDB(mysql)**: Fix for dashboard_version.data column type, now changed to MEDIUMTEXT, fixes [#8813](https://github.com/grafana/grafana/issues/8813)
|
||||
* **Dashboard(settings)**: Closing setting views using ESC key did not update url correctly, fixes [#8869](https://github.com/grafana/grafana/issues/8869)
|
||||
* **InfluxDB**: Wrong username/password parameter name when using direct access, fixes [#8789](https://github.com/grafana/grafana/issues/8789)
|
||||
* **Forms(TextArea)**: Bug fix for no scroll in text areas [#8797](https://github.com/grafana/grafana/issues/8797)
|
||||
* **Png Render API**: Bug fix for timeout url parameter. It now works as it should. Default value was also increased from 30 to 60 seconds [#8710](https://github.com/grafana/grafana/issues/8710)
|
||||
* **Search**: Fix for not being able to close search by clicking on right side of search result container, [8848](https://github.com/grafana/grafana/issues/8848)
|
||||
* **Cloudwatch**: Fix for using variables in templating metrics() query, [8965](https://github.com/grafana/grafana/issues/8965)
|
||||
|
||||
## Changes
|
||||
|
||||
* **Settings(defaults)**: allow_sign_up default changed from true to false [#8743](https://github.com/grafana/grafana/issues/8743)
|
||||
* **Settings(defaults)**: allow_org_create default changed from true to false
|
||||
|
||||
# 4.4.1 (2017-07-05)
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* **Migrations**: migration fails where dashboard.created_by is null [#8783](https://github.com/grafana/grafana/issues/8783)
|
||||
|
||||
# 4.4.0 (2017-07-04)
|
||||
|
||||
## New Features
|
||||
**Dashboard History**: View dashboard version history, compare any two versions (summary & json diffs), restore to old version. This big feature
|
||||
was contributed by **Walmart Labs**. Big thanks to them for this massive contribution!
|
||||
Initial feature request: [#4638](https://github.com/grafana/grafana/issues/4638)
|
||||
Pull Request: [#8472](https://github.com/grafana/grafana/pull/8472)
|
||||
|
||||
## Enhancements
|
||||
* **Elasticsearch**: Added filter aggregation label [#8420](https://github.com/grafana/grafana/pull/8420), thx [@tianzk](github.com/tianzk)
|
||||
* **Sensu**: Added option for source and handler [#8405](https://github.com/grafana/grafana/pull/8405), thx [@joemiller](github.com/joemiller)
|
||||
* **CSV**: Configurable csv export datetime format [#8058](https://github.com/grafana/grafana/issues/8058), thx [@cederigo](github.com/cederigo)
|
||||
* **Table Panel**: Column style that preserves formatting/indentation (like pre tag) [#6617](https://github.com/grafana/grafana/issues/6617)
|
||||
* **DingDing**: Add DingDing Alert Notifier [#8473](https://github.com/grafana/grafana/pull/8473) thx [@jiamliang](https://github.com/jiamliang)
|
||||
|
||||
## Minor Enhancements
|
||||
|
||||
* **Elasticsearch**: Add option for result set size in raw_document [#3426](https://github.com/grafana/grafana/issues/3426) [#8527](https://github.com/grafana/grafana/pull/8527), thx [@mk-dhia](github.com/mk-dhia)
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* **Graph**: Bug fix for negative values in histogram mode [#8628](https://github.com/grafana/grafana/issues/8628)
|
||||
|
||||
# 4.3.2 (2017-05-31)
|
||||
|
||||
## Bug fixes
|
||||
|
||||
* **InfluxDB**: Fixed issue with query editor not showing ALIAS BY input field when in text editor mode [#8459](https://github.com/grafana/grafana/issues/8459)
|
||||
* **Graph Log Scale**: Fixed issue with log scale going below x-axis [#8244](https://github.com/grafana/grafana/issues/8244)
|
||||
* **Playlist**: Fixed dashboard play order issue [#7688](https://github.com/grafana/grafana/issues/7688)
|
||||
* **Elasticsearch**: Fixed table query issue with ES 2.x [#8467](https://github.com/grafana/grafana/issues/8467), thx [@goldeelox](https://github.com/goldeelox)
|
||||
|
||||
## Changes
|
||||
* **Lazy Loading Of Panels**: Panels are no longer loaded as they are scrolled into view, this was reverted due to Chrome bug, might be reintroduced when Chrome fixes it's JS blocking behavior on scroll. [#8500](https://github.com/grafana/grafana/issues/8500)
|
||||
|
||||
# 4.3.1 (2017-05-23)
|
||||
|
||||
## Bug fixes
|
||||
|
||||
* **S3 image upload**: Fixed image url issue for us-east-1 (us standard) region. If you were missing slack images for alert notifications this should fix it. [#8444](https://github.com/grafana/grafana/issues/8444)
|
||||
|
||||
# 4.3.0-stable (2017-05-23)
|
||||
|
||||
## Bug fixes
|
||||
|
||||
@@ -30,7 +149,7 @@
|
||||
* **Heatmap**: Heatmap Panel [#7934](https://github.com/grafana/grafana/pull/7934)
|
||||
* **Elasticsearch**: histogram aggregation [#3164](https://github.com/grafana/grafana/issues/3164)
|
||||
|
||||
## Minor Enchancements
|
||||
## Minor Enhancements
|
||||
|
||||
* **InfluxDB**: Small fix for the "glow" when focus the field for LIMIT and SLIMIT [#7799](https://github.com/grafana/grafana/pull/7799) thx [@thuck](https://github.com/thuck)
|
||||
* **Prometheus**: Make Prometheus query field a textarea [#7663](https://github.com/grafana/grafana/issues/7663), thx [@hagen1778](https://github.com/hagen1778)
|
||||
|
||||
46
CODE_OF_CONDUCT.md
Normal file
46
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at contact@grafana.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
||||
92
README.md
92
README.md
@@ -1,4 +1,4 @@
|
||||
[Grafana](https://grafana.com) [](https://circleci.com/gh/grafana/grafana)
|
||||
[Grafana](https://grafana.com) [](https://circleci.com/gh/grafana/grafana) [](https://goreportcard.com/report/github.com/grafana/grafana)
|
||||
================
|
||||
[Website](https://grafana.com) |
|
||||
[Twitter](https://twitter.com/grafana) |
|
||||
@@ -9,64 +9,8 @@ Graphite, Elasticsearch, OpenTSDB, Prometheus and InfluxDB.
|
||||
|
||||

|
||||
|
||||
- [Install instructions](http://docs.grafana.org/installation/)
|
||||
- [What's New in Grafana 2.0](http://docs.grafana.org/guides/whats-new-in-v2/)
|
||||
- [What's New in Grafana 2.1](http://docs.grafana.org/guides/whats-new-in-v2-1/)
|
||||
- [What's New in Grafana 2.5](http://docs.grafana.org/guides/whats-new-in-v2-5/)
|
||||
- [What's New in Grafana 3.0](http://docs.grafana.org/guides/whats-new-in-v3/)
|
||||
- [What's New in Grafana 4.0](http://docs.grafana.org/guides/whats-new-in-v4/)
|
||||
- [What's New in Grafana 4.1](http://docs.grafana.org/guides/whats-new-in-v4-1/)
|
||||
- [What's New in Grafana 4.2](http://docs.grafana.org/guides/whats-new-in-v4-2/)
|
||||
- [What's New in Grafana 4.3](http://docs.grafana.org/guides/whats-new-in-v4-3/)
|
||||
|
||||
## Features
|
||||
|
||||
### Graphing
|
||||
- Fast rendering, even over large timespans
|
||||
- Click and drag to zoom
|
||||
- Multiple Y-axis, logarithmic scales
|
||||
- Bars, Lines, Points
|
||||
- Smart Y-axis formatting
|
||||
- Series toggles & color selector
|
||||
- Legend values, and formatting options
|
||||
- Grid thresholds, axis labels
|
||||
- [Annotations](http://docs.grafana.org/reference/annotations/)
|
||||
- Any panel can be rendered to PNG (server side using phantomjs)
|
||||
|
||||
### Dashboards
|
||||
- Create, edit, save & search dashboards
|
||||
- Change column spans and row heights
|
||||
- Drag and drop panels to rearrange
|
||||
- [Templating](http://docs.grafana.org/reference/templating/)
|
||||
- [Scripted dashboards](http://docs.grafana.org/reference/scripting/)
|
||||
- [Dashboard playlists](http://docs.grafana.org/reference/playlist/)
|
||||
- [Time range controls](http://docs.grafana.org/reference/timerange/)
|
||||
- [Share snapshots publicly](http://docs.grafana.org/v2.0/reference/sharing/)
|
||||
|
||||
### InfluxDB
|
||||
- Use InfluxDB as a metric data source, annotation source
|
||||
- Query editor with field and tag typeahead, easy group by and function selection
|
||||
|
||||
### Graphite
|
||||
- Graphite target expression parser
|
||||
- Feature rich query composer
|
||||
- Quickly add and edit functions & parameters
|
||||
- Templated queries
|
||||
- [See it in action](http://docs.grafana.org/datasources/graphite/)
|
||||
|
||||
### Elasticsearch, Prometheus & OpenTSDB
|
||||
- Feature rich query editor UI
|
||||
|
||||
### Alerting
|
||||
- Define alert rules using graphs & query conditions
|
||||
- Schedule & evalute alert rules, send notifications to Slack, Hipchat, Email, PagerDuty, etc.
|
||||
|
||||
## Requirements
|
||||
There are no dependencies except an external time series data store. For dashboards and user accounts Grafana can use an embedded
|
||||
database (sqlite3) or you can use an external SQL data base like MySQL or Postgres.
|
||||
|
||||
## Installation
|
||||
Head to [grafana.org](http://docs.grafana.org/installation/) and [download](https://grafana.com/get)
|
||||
Head to [docs.grafana.org](http://docs.grafana.org/installation/) and [download](https://grafana.com/get)
|
||||
the latest release.
|
||||
|
||||
If you have any problems please read the [troubleshooting guide](http://docs.grafana.org/installation/troubleshooting/).
|
||||
@@ -83,27 +27,10 @@ the latest master builds [here](https://grafana.com/grafana/download)
|
||||
- Go 1.8.1
|
||||
- NodeJS LTS
|
||||
|
||||
### Get Code
|
||||
|
||||
```bash
|
||||
go get github.com/grafana/grafana
|
||||
```
|
||||
|
||||
Since imports of dependencies use the absolute path `github.com/grafana/grafana` within the `$GOPATH`,
|
||||
you will need to put your version of the code in `$GOPATH/src/github.com/grafana/grafana` to be able
|
||||
to develop and build grafana on a cloned repository. To do so, you can clone your forked repository
|
||||
directly to `$GOPATH/src/github.com/grafana` or you can create a symbolic link from your version
|
||||
of the code to `$GOPATH/src/github.com/grafana/grafana`. The last options makes it possible to change
|
||||
easily the grafana repository you want to build.
|
||||
```bash
|
||||
go get github.com/*your_account*/grafana
|
||||
mkdir $GOPATH/src/github.com/grafana
|
||||
ln -s $GOPATH/src/github.com/*your_account*/grafana $GOPATH/src/github.com/grafana/grafana
|
||||
```
|
||||
|
||||
### Building the backend
|
||||
```bash
|
||||
cd $GOPATH/src/github.com/grafana/grafana
|
||||
go get github.com/grafana/grafana
|
||||
cd ~/go/src/github.com/grafana/grafana
|
||||
go run build.go setup
|
||||
go run build.go build
|
||||
```
|
||||
@@ -122,8 +49,7 @@ npm run build
|
||||
To build the frontend assets only on changes:
|
||||
|
||||
```bash
|
||||
sudo npm install -g grunt-cli # to do only once to install grunt command line interface
|
||||
grunt watch
|
||||
npm run dev
|
||||
```
|
||||
|
||||
### Recompile backend on source change
|
||||
@@ -133,11 +59,6 @@ go get github.com/Unknwon/bra
|
||||
bra run
|
||||
```
|
||||
|
||||
### Running
|
||||
```bash
|
||||
./bin/grafana-server
|
||||
```
|
||||
|
||||
Open grafana in your browser (default: `http://localhost:3000`) and login with admin user (default: `user/pass = admin/admin`).
|
||||
|
||||
### Dev config
|
||||
@@ -148,9 +69,6 @@ You only need to add the options you want to override. Config files are applied
|
||||
1. grafana.ini
|
||||
1. custom.ini
|
||||
|
||||
## Create a pull request
|
||||
Before or after you create a pull request, sign the [contributor license agreement](http://docs.grafana.org/project/cla/).
|
||||
|
||||
## Contribute
|
||||
If you have any idea for an improvement or found a bug do not hesitate to open an issue.
|
||||
And if you have time clone this repo and submit a pull request and help me make Grafana
|
||||
|
||||
22
ROADMAP.md
22
ROADMAP.md
@@ -1,31 +1,29 @@
|
||||
# Roadmap (2017-04-23)
|
||||
# Roadmap (2017-08-29)
|
||||
|
||||
This roadmap is a tentative plan for the core development team. Things change constantly as PRs come in and priorities change.
|
||||
But it will give you an idea of our current vision and plan.
|
||||
|
||||
### Short term (1-4 months)
|
||||
|
||||
- New Heatmap Panel (Implemented and available in master)
|
||||
- Support for MySQL & Postgres as data sources (Work started and a alpha version for MySQL is available in master)
|
||||
- User Groups & Dashboard folders with ACLs (work started, not yet completed, https://github.com/grafana/grafana/issues/1611#issuecomment-287742633)
|
||||
- Improve new user UX
|
||||
- Improve docs
|
||||
- Support for alerting for Elasticsearch (can be tested in [branch](https://github.com/grafana/grafana/tree/alerting-elasticsearch) but needs more work)
|
||||
- Graph annotations (create from grafana, region annotations, better annotation viz)
|
||||
- Improve alerting (clustering, silence rules)
|
||||
- Release Grafana v4.5 with fixes and minor enhancements
|
||||
- Release Grafana v5
|
||||
- User groups
|
||||
- Dashboard folders
|
||||
- Dashboard permissions (on folders as well), permissions on groups or users
|
||||
- New Dashboard layout engine
|
||||
- New sidemenu & nav UX
|
||||
- Elasticsearch alerting
|
||||
|
||||
### Long term
|
||||
|
||||
- Improved dashboard panel layout engine (to make it easier and enable more flexible layouts)
|
||||
- Backend plugins to support more Auth options, Alerting data sources & notifications
|
||||
- Universial time series transformations for any data source (meta queries)
|
||||
- Reporting
|
||||
- Web socket & live data streams
|
||||
- Migrate to Angular2
|
||||
- Migrate to Angular2 or react
|
||||
|
||||
|
||||
### Outside contributions
|
||||
We know this is being worked on right now by contributors (and we hope to merge it when it's ready).
|
||||
|
||||
- Dashboard revisions (be able to revert dashboard changes)
|
||||
- Clustering for alert engine (load distribution)
|
||||
|
||||
@@ -32,6 +32,7 @@ build_script:
|
||||
- grunt release
|
||||
- go run build.go sha-dist
|
||||
- cp dist/* .
|
||||
- go test -v ./pkg/...
|
||||
|
||||
artifacts:
|
||||
- path: grafana-*windows-*.*
|
||||
|
||||
12
build.go
12
build.go
@@ -95,7 +95,9 @@ func main() {
|
||||
|
||||
case "package":
|
||||
grunt(gruntBuildArg("release")...)
|
||||
createLinuxPackages()
|
||||
if runtime.GOOS != "windows" {
|
||||
createLinuxPackages()
|
||||
}
|
||||
|
||||
case "pkg-rpm":
|
||||
grunt(gruntBuildArg("release")...)
|
||||
@@ -235,7 +237,7 @@ func createRpmPackages() {
|
||||
defaultFileSrc: "packaging/rpm/sysconfig/grafana-server",
|
||||
systemdFileSrc: "packaging/rpm/systemd/grafana-server.service",
|
||||
|
||||
depends: []string{"/sbin/service", "fontconfig"},
|
||||
depends: []string{"/sbin/service", "fontconfig", "freetype", "urw-fonts"},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -345,7 +347,11 @@ func ChangeWorkingDir(dir string) {
|
||||
}
|
||||
|
||||
func grunt(params ...string) {
|
||||
runPrint("./node_modules/.bin/grunt", params...)
|
||||
if runtime.GOOS == "windows" {
|
||||
runPrint(`.\node_modules\.bin\grunt`, params...)
|
||||
} else {
|
||||
runPrint("./node_modules/.bin/grunt", params...)
|
||||
}
|
||||
}
|
||||
|
||||
func gruntBuildArg(task string) []string {
|
||||
|
||||
@@ -76,8 +76,10 @@ password =
|
||||
# Example: mysql://user:secret@host:port/database
|
||||
url =
|
||||
|
||||
# Max idle conn setting default is 2
|
||||
max_idle_conn = 2
|
||||
|
||||
# Max conn setting default is 0 (mean not set)
|
||||
max_idle_conn =
|
||||
max_open_conn =
|
||||
|
||||
# For "postgres", use either "disable", "require" or "verify-full"
|
||||
@@ -184,10 +186,10 @@ snapshot_TTL_days = 90
|
||||
#################################### Users ####################################
|
||||
[users]
|
||||
# disable user signup / registration
|
||||
allow_sign_up = true
|
||||
allow_sign_up = false
|
||||
|
||||
# Allow non admin users to create organizations
|
||||
allow_org_create = true
|
||||
allow_org_create = false
|
||||
|
||||
# Set to true to automatically assign new users to the default organization (id 1)
|
||||
auto_assign_org = true
|
||||
@@ -204,6 +206,11 @@ login_hint = email or username
|
||||
# Default UI theme ("dark" or "light")
|
||||
default_theme = dark
|
||||
|
||||
# External user management
|
||||
external_manage_link_url =
|
||||
external_manage_link_name =
|
||||
external_manage_info =
|
||||
|
||||
[auth]
|
||||
# Set to true to disable (hide) the login form, useful if you use OAuth
|
||||
disable_login_form = false
|
||||
|
||||
@@ -85,8 +85,10 @@
|
||||
# For "sqlite3" only, path relative to data_path setting
|
||||
;path = grafana.db
|
||||
|
||||
# Max idle conn setting default is 2
|
||||
;max_idle_conn = 2
|
||||
|
||||
# Max conn setting default is 0 (mean not set)
|
||||
;max_idle_conn =
|
||||
;max_open_conn =
|
||||
|
||||
|
||||
@@ -191,6 +193,11 @@
|
||||
# Default UI theme ("dark" or "light")
|
||||
;default_theme = dark
|
||||
|
||||
# External user management, these options affect the organization users view
|
||||
;external_manage_link_url =
|
||||
;external_manage_link_name =
|
||||
;external_manage_info =
|
||||
|
||||
[auth]
|
||||
# Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false
|
||||
;disable_login_form = false
|
||||
@@ -298,7 +305,7 @@
|
||||
# Use space to separate multiple modes, e.g. "console file"
|
||||
;mode = console file
|
||||
|
||||
# Either "trace", "debug", "info", "warn", "error", "critical", default is "info"
|
||||
# Either "debug", "info", "warn", "error", "critical", default is "info"
|
||||
;level = info
|
||||
|
||||
# optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
|
||||
|
||||
@@ -32,6 +32,7 @@ add ./files/my_htpasswd /etc/nginx/.htpasswd
|
||||
# Add system service config
|
||||
add ./files/nginx.conf /etc/nginx/nginx.conf
|
||||
add ./files/supervisord.conf /etc/supervisor/conf.d/supervisord.conf
|
||||
|
||||
# Nginx
|
||||
#
|
||||
# graphite
|
||||
@@ -39,6 +40,7 @@ expose 80
|
||||
|
||||
# Carbon line receiver port
|
||||
expose 2003
|
||||
|
||||
# Carbon cache query port
|
||||
expose 7002
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ graphite:
|
||||
- "8080:80"
|
||||
- "2003:2003"
|
||||
volumes:
|
||||
- /var/docker/gfdev/graphite:/opt/graphite/storage/whisper
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
|
||||
|
||||
93
docker/blocks/graphite1/Dockerfile
Normal file
93
docker/blocks/graphite1/Dockerfile
Normal file
@@ -0,0 +1,93 @@
|
||||
FROM phusion/baseimage:0.9.22
|
||||
MAINTAINER Denys Zhdanov <denis.zhdanov@gmail.com>
|
||||
|
||||
RUN apt-get -y update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get -y --force-yes install vim \
|
||||
nginx \
|
||||
python-dev \
|
||||
python-flup \
|
||||
python-pip \
|
||||
python-ldap \
|
||||
expect \
|
||||
git \
|
||||
memcached \
|
||||
sqlite3 \
|
||||
libffi-dev \
|
||||
libcairo2 \
|
||||
libcairo2-dev \
|
||||
python-cairo \
|
||||
python-rrdtool \
|
||||
pkg-config \
|
||||
nodejs \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# fix python dependencies (LTS Django and newer memcached/txAMQP)
|
||||
RUN pip install django==1.8.18 \
|
||||
python-memcached==1.53 \
|
||||
txAMQP==0.6.2 \
|
||||
&& pip install --upgrade pip
|
||||
|
||||
# install whisper
|
||||
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/whisper.git /usr/local/src/whisper
|
||||
WORKDIR /usr/local/src/whisper
|
||||
RUN python ./setup.py install
|
||||
|
||||
# install carbon
|
||||
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/carbon.git /usr/local/src/carbon
|
||||
WORKDIR /usr/local/src/carbon
|
||||
RUN pip install -r requirements.txt \
|
||||
&& python ./setup.py install
|
||||
|
||||
# install graphite
|
||||
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
|
||||
WORKDIR /usr/local/src/graphite-web
|
||||
RUN pip install -r requirements.txt \
|
||||
&& python ./setup.py install
|
||||
ADD conf/opt/graphite/conf/*.conf /opt/graphite/conf/
|
||||
ADD conf/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py
|
||||
ADD conf/opt/graphite/webapp/graphite/app_settings.py /opt/graphite/webapp/graphite/app_settings.py
|
||||
WORKDIR /opt/graphite/webapp
|
||||
RUN mkdir -p /var/log/graphite/ \
|
||||
&& PYTHONPATH=/opt/graphite/webapp django-admin.py collectstatic --noinput --settings=graphite.settings
|
||||
|
||||
# install statsd
|
||||
RUN git clone -b v0.7.2 https://github.com/etsy/statsd.git /opt/statsd
|
||||
ADD conf/opt/statsd/config.js /opt/statsd/config.js
|
||||
|
||||
# config nginx
|
||||
RUN rm /etc/nginx/sites-enabled/default
|
||||
ADD conf/etc/nginx/nginx.conf /etc/nginx/nginx.conf
|
||||
ADD conf/etc/nginx/sites-enabled/graphite-statsd.conf /etc/nginx/sites-enabled/graphite-statsd.conf
|
||||
|
||||
# init django admin
|
||||
ADD conf/usr/local/bin/django_admin_init.exp /usr/local/bin/django_admin_init.exp
|
||||
ADD conf/usr/local/bin/manage.sh /usr/local/bin/manage.sh
|
||||
RUN chmod +x /usr/local/bin/manage.sh \
|
||||
&& /usr/local/bin/django_admin_init.exp
|
||||
|
||||
# logging support
|
||||
RUN mkdir -p /var/log/carbon /var/log/graphite /var/log/nginx
|
||||
ADD conf/etc/logrotate.d/graphite-statsd /etc/logrotate.d/graphite-statsd
|
||||
|
||||
# daemons
|
||||
ADD conf/etc/service/carbon/run /etc/service/carbon/run
|
||||
ADD conf/etc/service/carbon-aggregator/run /etc/service/carbon-aggregator/run
|
||||
ADD conf/etc/service/graphite/run /etc/service/graphite/run
|
||||
ADD conf/etc/service/statsd/run /etc/service/statsd/run
|
||||
ADD conf/etc/service/nginx/run /etc/service/nginx/run
|
||||
|
||||
# default conf setup
|
||||
ADD conf /etc/graphite-statsd/conf
|
||||
ADD conf/etc/my_init.d/01_conf_init.sh /etc/my_init.d/01_conf_init.sh
|
||||
|
||||
# cleanup
|
||||
RUN apt-get clean\
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# defaults
|
||||
EXPOSE 80 2003-2004 2023-2024 8125/udp 8126
|
||||
VOLUME ["/opt/graphite/conf", "/opt/graphite/storage", "/etc/nginx", "/opt/statsd", "/etc/logrotate.d", "/var/log"]
|
||||
WORKDIR /
|
||||
ENV HOME /root
|
||||
CMD ["/sbin/my_init"]
|
||||
11
docker/blocks/graphite1/conf/etc/logrotate.d/graphite-statsd
Normal file
11
docker/blocks/graphite1/conf/etc/logrotate.d/graphite-statsd
Normal file
@@ -0,0 +1,11 @@
|
||||
/var/log/*.log /var/log/*/*.log {
|
||||
weekly
|
||||
size 50M
|
||||
missingok
|
||||
rotate 10
|
||||
compress
|
||||
delaycompress
|
||||
notifempty
|
||||
copytruncate
|
||||
su root syslog
|
||||
}
|
||||
36
docker/blocks/graphite1/conf/etc/my_init.d/01_conf_init.sh
Executable file
36
docker/blocks/graphite1/conf/etc/my_init.d/01_conf_init.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
|
||||
conf_dir=/etc/graphite-statsd/conf
|
||||
|
||||
# auto setup graphite with default configs if /opt/graphite is missing
|
||||
# needed for the use case when a docker host volume is mounted at an of the following:
|
||||
# - /opt/graphite
|
||||
# - /opt/graphite/conf
|
||||
# - /opt/graphite/webapp/graphite
|
||||
graphite_dir_contents=$(find /opt/graphite -mindepth 1 -print -quit)
|
||||
graphite_conf_dir_contents=$(find /opt/graphite/conf -mindepth 1 -print -quit)
|
||||
graphite_webapp_dir_contents=$(find /opt/graphite/webapp/graphite -mindepth 1 -print -quit)
|
||||
graphite_storage_dir_contents=$(find /opt/graphite/storage -mindepth 1 -print -quit)
|
||||
if [[ -z $graphite_dir_contents ]]; then
|
||||
git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
|
||||
cd /usr/local/src/graphite-web && python ./setup.py install
|
||||
fi
|
||||
if [[ -z $graphite_storage_dir_contents ]]; then
|
||||
/usr/local/bin/django_admin_init.exp
|
||||
fi
|
||||
if [[ -z $graphite_conf_dir_contents ]]; then
|
||||
cp -R $conf_dir/opt/graphite/conf/*.conf /opt/graphite/conf/
|
||||
fi
|
||||
if [[ -z $graphite_webapp_dir_contents ]]; then
|
||||
cp $conf_dir/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py
|
||||
fi
|
||||
|
||||
# auto setup statsd with default config if /opt/statsd is missing
|
||||
# needed for the use case when a docker host volume is mounted at an of the following:
|
||||
# - /opt/statsd
|
||||
statsd_dir_contents=$(find /opt/statsd -mindepth 1 -print -quit)
|
||||
if [[ -z $statsd_dir_contents ]]; then
|
||||
git clone -b v0.7.2 https://github.com/etsy/statsd.git /opt/statsd
|
||||
cp $conf_dir/opt/statsd/config.js /opt/statsd/config.js
|
||||
fi
|
||||
|
||||
96
docker/blocks/graphite1/conf/etc/nginx/nginx.conf
Normal file
96
docker/blocks/graphite1/conf/etc/nginx/nginx.conf
Normal file
@@ -0,0 +1,96 @@
|
||||
user www-data;
|
||||
worker_processes 4;
|
||||
pid /run/nginx.pid;
|
||||
daemon off;
|
||||
|
||||
events {
|
||||
worker_connections 768;
|
||||
# multi_accept on;
|
||||
}
|
||||
|
||||
http {
|
||||
|
||||
##
|
||||
# Basic Settings
|
||||
##
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
# server_tokens off;
|
||||
|
||||
# server_names_hash_bucket_size 64;
|
||||
# server_name_in_redirect off;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
##
|
||||
# Logging Settings
|
||||
##
|
||||
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
##
|
||||
# Gzip Settings
|
||||
##
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
|
||||
# gzip_vary on;
|
||||
# gzip_proxied any;
|
||||
# gzip_comp_level 6;
|
||||
# gzip_buffers 16 8k;
|
||||
# gzip_http_version 1.1;
|
||||
# gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
|
||||
##
|
||||
# nginx-naxsi config
|
||||
##
|
||||
# Uncomment it if you installed nginx-naxsi
|
||||
##
|
||||
|
||||
#include /etc/nginx/naxsi_core.rules;
|
||||
|
||||
##
|
||||
# nginx-passenger config
|
||||
##
|
||||
# Uncomment it if you installed nginx-passenger
|
||||
##
|
||||
|
||||
#passenger_root /usr;
|
||||
#passenger_ruby /usr/bin/ruby;
|
||||
|
||||
##
|
||||
# Virtual Host Configs
|
||||
##
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
include /etc/nginx/sites-enabled/*;
|
||||
}
|
||||
|
||||
|
||||
#mail {
|
||||
# # See sample authentication script at:
|
||||
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
|
||||
#
|
||||
# # auth_http localhost/auth.php;
|
||||
# # pop3_capabilities "TOP" "USER";
|
||||
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
|
||||
#
|
||||
# server {
|
||||
# listen localhost:110;
|
||||
# protocol pop3;
|
||||
# proxy on;
|
||||
# }
|
||||
#
|
||||
# server {
|
||||
# listen localhost:143;
|
||||
# protocol imap;
|
||||
# proxy on;
|
||||
# }
|
||||
#}
|
||||
@@ -0,0 +1,31 @@
|
||||
server {
|
||||
listen 80;
|
||||
root /opt/graphite/static;
|
||||
index index.html;
|
||||
|
||||
location /media {
|
||||
# django admin static files
|
||||
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/media/;
|
||||
}
|
||||
|
||||
location /admin/auth/admin {
|
||||
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/static/admin;
|
||||
}
|
||||
|
||||
location /admin/auth/user/admin {
|
||||
alias /usr/local/lib/python2.7/dist-packages/django/contrib/admin/static/admin;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:8080;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
add_header 'Access-Control-Allow-Origin' '*';
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
|
||||
add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type';
|
||||
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||
}
|
||||
|
||||
}
|
||||
4
docker/blocks/graphite1/conf/etc/service/carbon-aggregator/run
Executable file
4
docker/blocks/graphite1/conf/etc/service/carbon-aggregator/run
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
rm -f /opt/graphite/storage/carbon-aggregator-a.pid
|
||||
exec /usr/bin/python /opt/graphite/bin/carbon-aggregator.py start --debug 2>&1 >> /var/log/carbon-aggregator.log
|
||||
4
docker/blocks/graphite1/conf/etc/service/carbon/run
Executable file
4
docker/blocks/graphite1/conf/etc/service/carbon/run
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
rm -f /opt/graphite/storage/carbon-cache-a.pid
|
||||
exec /usr/bin/python /opt/graphite/bin/carbon-cache.py start --debug 2>&1 >> /var/log/carbon.log
|
||||
3
docker/blocks/graphite1/conf/etc/service/graphite/run
Executable file
3
docker/blocks/graphite1/conf/etc/service/graphite/run
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
export PYTHONPATH=/opt/graphite/webapp && exec /usr/local/bin/gunicorn wsgi --workers=4 --bind=127.0.0.1:8080 --log-file=/var/log/gunicorn.log --preload --pythonpath=/opt/graphite/webapp/graphite
|
||||
4
docker/blocks/graphite1/conf/etc/service/nginx/run
Executable file
4
docker/blocks/graphite1/conf/etc/service/nginx/run
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
mkdir -p /var/log/nginx
|
||||
exec /usr/sbin/nginx -c /etc/nginx/nginx.conf
|
||||
4
docker/blocks/graphite1/conf/etc/service/statsd/run
Executable file
4
docker/blocks/graphite1/conf/etc/service/statsd/run
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
exec /usr/bin/nodejs /opt/statsd/stats.js /opt/statsd/config.js >> /var/log/statsd.log 2>&1
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
# The form of each line in this file should be as follows:
|
||||
#
|
||||
# output_template (frequency) = method input_pattern
|
||||
#
|
||||
# This will capture any received metrics that match 'input_pattern'
|
||||
# for calculating an aggregate metric. The calculation will occur
|
||||
# every 'frequency' seconds and the 'method' can specify 'sum' or
|
||||
# 'avg'. The name of the aggregate metric will be derived from
|
||||
# 'output_template' filling in any captured fields from 'input_pattern'.
|
||||
#
|
||||
# For example, if you're metric naming scheme is:
|
||||
#
|
||||
# <env>.applications.<app>.<server>.<metric>
|
||||
#
|
||||
# You could configure some aggregations like so:
|
||||
#
|
||||
# <env>.applications.<app>.all.requests (60) = sum <env>.applications.<app>.*.requests
|
||||
# <env>.applications.<app>.all.latency (60) = avg <env>.applications.<app>.*.latency
|
||||
#
|
||||
# As an example, if the following metrics are received:
|
||||
#
|
||||
# prod.applications.apache.www01.requests
|
||||
# prod.applications.apache.www01.requests
|
||||
#
|
||||
# They would all go into the same aggregation buffer and after 60 seconds the
|
||||
# aggregate metric 'prod.applications.apache.all.requests' would be calculated
|
||||
# by summing their values.
|
||||
#
|
||||
# Template components such as <env> will match everything up to the next dot.
|
||||
# To match metric multiple components including the dots, use <<metric>> in the
|
||||
# input template:
|
||||
#
|
||||
# <env>.applications.<app>.all.<app_metric> (60) = sum <env>.applications.<app>.*.<<app_metric>>
|
||||
#
|
||||
# Note that any time this file is modified, it will be re-read automatically.
|
||||
@@ -0,0 +1,5 @@
|
||||
# This file takes a single regular expression per line
|
||||
# If USE_WHITELIST is set to True in carbon.conf, any metrics received which
|
||||
# match one of these expressions will be dropped
|
||||
# This file is reloaded automatically when changes are made
|
||||
^some\.noisy\.metric\.prefix\..*
|
||||
@@ -0,0 +1,75 @@
|
||||
# This is a configuration file with AMQP enabled
|
||||
|
||||
[cache]
|
||||
LOCAL_DATA_DIR =
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
# means the number of write requests sent to the disk. This is intended to
|
||||
# prevent over-utilizing the disk and thus starving the rest of the system.
|
||||
# When the rate of required updates exceeds this, then carbon's caching will
|
||||
# take effect and increase the overall throughput accordingly.
|
||||
MAX_UPDATES_PER_SECOND = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (like at 50) is a good way to ensure your graphite
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that it will take much longer for those metrics'
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
MAX_CREATES_PER_MINUTE = inf
|
||||
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
UDP_RECEIVER_INTERFACE = 0.0.0.0
|
||||
UDP_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0
|
||||
CACHE_QUERY_PORT = 7002
|
||||
|
||||
# Enable AMQP if you want to receve metrics using you amqp broker
|
||||
ENABLE_AMQP = True
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
# useful for testing
|
||||
AMQP_VERBOSE = True
|
||||
|
||||
# your credentials for the amqp server
|
||||
# AMQP_USER = guest
|
||||
# AMQP_PASSWORD = guest
|
||||
|
||||
# the network settings for the amqp server
|
||||
# AMQP_HOST = localhost
|
||||
# AMQP_PORT = 5672
|
||||
|
||||
# if you want to include the metric name as part of the message body
|
||||
# instead of as the routing key, set this to True
|
||||
# AMQP_METRIC_NAME_IN_BODY = False
|
||||
|
||||
# NOTE: you cannot run both a cache and a relay on the same server
|
||||
# with the default configuration, you have to specify a distinict
|
||||
# interfaces and ports for the listeners.
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_SERVERS = server1, server2, server3
|
||||
MAX_QUEUE_SIZE = 10000
|
||||
359
docker/blocks/graphite1/conf/opt/graphite/conf/carbon.conf
Normal file
359
docker/blocks/graphite1/conf/opt/graphite/conf/carbon.conf
Normal file
@@ -0,0 +1,359 @@
|
||||
[cache]
|
||||
# Configure carbon directories.
|
||||
#
|
||||
# OS environment variables can be used to tell carbon where graphite is
|
||||
# installed, where to read configuration from and where to write data.
|
||||
#
|
||||
# GRAPHITE_ROOT - Root directory of the graphite installation.
|
||||
# Defaults to ../
|
||||
# GRAPHITE_CONF_DIR - Configuration directory (where this file lives).
|
||||
# Defaults to $GRAPHITE_ROOT/conf/
|
||||
# GRAPHITE_STORAGE_DIR - Storage directory for whipser/rrd/log/pid files.
|
||||
# Defaults to $GRAPHITE_ROOT/storage/
|
||||
#
|
||||
# To change other directory paths, add settings to this file. The following
|
||||
# configuration variables are available with these default values:
|
||||
#
|
||||
# STORAGE_DIR = $GRAPHITE_STORAGE_DIR
|
||||
# LOCAL_DATA_DIR = STORAGE_DIR/whisper/
|
||||
# WHITELISTS_DIR = STORAGE_DIR/lists/
|
||||
# CONF_DIR = STORAGE_DIR/conf/
|
||||
# LOG_DIR = STORAGE_DIR/log/
|
||||
# PID_DIR = STORAGE_DIR/
|
||||
#
|
||||
# For FHS style directory structures, use:
|
||||
#
|
||||
# STORAGE_DIR = /var/lib/carbon/
|
||||
# CONF_DIR = /etc/carbon/
|
||||
# LOG_DIR = /var/log/carbon/
|
||||
# PID_DIR = /var/run/
|
||||
#
|
||||
#LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
|
||||
|
||||
# Enable daily log rotation. If disabled, a kill -HUP can be used after a manual rotate
|
||||
ENABLE_LOGROTATION = True
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
#
|
||||
# NOTE: The above settings must be set under [relay] and [aggregator]
|
||||
# to take effect for those daemons as well
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
# means the number of write requests sent to the disk. This is intended to
|
||||
# prevent over-utilizing the disk and thus starving the rest of the system.
|
||||
# When the rate of required updates exceeds this, then carbon's caching will
|
||||
# take effect and increase the overall throughput accordingly.
|
||||
MAX_UPDATES_PER_SECOND = 500
|
||||
|
||||
# If defined, this changes the MAX_UPDATES_PER_SECOND in Carbon when a
|
||||
# stop/shutdown is initiated. This helps when MAX_UPDATES_PER_SECOND is
|
||||
# relatively low and carbon has cached a lot of updates; it enables the carbon
|
||||
# daemon to shutdown more quickly.
|
||||
# MAX_UPDATES_PER_SECOND_ON_SHUTDOWN = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (like at 50) is a good way to ensure your graphite
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that it will take much longer for those metrics'
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
MAX_CREATES_PER_MINUTE = 50
|
||||
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
# Set this to True to enable the UDP listener. By default this is off
|
||||
# because it is very common to run multiple carbon daemons and managing
|
||||
# another (rarely used) port for every carbon instance is not fun.
|
||||
ENABLE_UDP_LISTENER = False
|
||||
UDP_RECEIVER_INTERFACE = 0.0.0.0
|
||||
UDP_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
# Set to false to disable logging of successful connections
|
||||
LOG_LISTENER_CONNECTIONS = True
|
||||
|
||||
# Per security concerns outlined in Bug #817247 the pickle receiver
|
||||
# will use a more secure and slightly less efficient unpickler.
|
||||
# Set this to True to revert to the old-fashioned insecure unpickler.
|
||||
USE_INSECURE_UNPICKLER = False
|
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0
|
||||
CACHE_QUERY_PORT = 7002
|
||||
|
||||
# Set this to False to drop datapoints received after the cache
|
||||
# reaches MAX_CACHE_SIZE. If this is True (the default) then sockets
|
||||
# over which metrics are received will temporarily stop accepting
|
||||
# data until the cache size falls below 95% MAX_CACHE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# By default, carbon-cache will log every whisper update and cache hit. This can be excessive and
|
||||
# degrade performance if logging on the same volume as the whisper data is stored.
|
||||
LOG_UPDATES = False
|
||||
LOG_CACHE_HITS = False
|
||||
LOG_CACHE_QUEUE_SORTS = True
|
||||
|
||||
# The thread that writes metrics to disk can use on of the following strategies
|
||||
# determining the order in which metrics are removed from cache and flushed to
|
||||
# disk. The default option preserves the same behavior as has been historically
|
||||
# available in version 0.9.10.
|
||||
#
|
||||
# sorted - All metrics in the cache will be counted and an ordered list of
|
||||
# them will be sorted according to the number of datapoints in the cache at the
|
||||
# moment of the list's creation. Metrics will then be flushed from the cache to
|
||||
# disk in that order.
|
||||
#
|
||||
# max - The writer thread will always pop and flush the metric from cache
|
||||
# that has the most datapoints. This will give a strong flush preference to
|
||||
# frequently updated metrics and will also reduce random file-io. Infrequently
|
||||
# updated metrics may only ever be persisted to disk at daemon shutdown if
|
||||
# there are a large number of metrics which receive very frequent updates OR if
|
||||
# disk i/o is very slow.
|
||||
#
|
||||
# naive - Metrics will be flushed from the cache to disk in an unordered
|
||||
# fashion. This strategy may be desirable in situations where the storage for
|
||||
# whisper files is solid state, CPU resources are very limited or deference to
|
||||
# the OS's i/o scheduler is expected to compensate for the random write
|
||||
# pattern.
|
||||
#
|
||||
CACHE_WRITE_STRATEGY = sorted
|
||||
|
||||
# On some systems it is desirable for whisper to write synchronously.
|
||||
# Set this option to True if you'd like to try this. Basically it will
|
||||
# shift the onus of buffering writes from the kernel into carbon's cache.
|
||||
WHISPER_AUTOFLUSH = False
|
||||
|
||||
# By default new Whisper files are created pre-allocated with the data region
|
||||
# filled with zeros to prevent fragmentation and speed up contiguous reads and
|
||||
# writes (which are common). Enabling this option will cause Whisper to create
|
||||
# the file sparsely instead. Enabling this option may allow a large increase of
|
||||
# MAX_CREATES_PER_MINUTE but may have longer term performance implications
|
||||
# depending on the underlying storage configuration.
|
||||
# WHISPER_SPARSE_CREATE = False
|
||||
|
||||
# Only beneficial on linux filesystems that support the fallocate system call.
|
||||
# It maintains the benefits of contiguous reads/writes, but with a potentially
|
||||
# much faster creation speed, by allowing the kernel to handle the block
|
||||
# allocation and zero-ing. Enabling this option may allow a large increase of
|
||||
# MAX_CREATES_PER_MINUTE. If enabled on an OS or filesystem that is unsupported
|
||||
# this option will gracefully fallback to standard POSIX file access methods.
|
||||
WHISPER_FALLOCATE_CREATE = True
|
||||
|
||||
# Enabling this option will cause Whisper to lock each Whisper file it writes
|
||||
# to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when
|
||||
# multiple carbon-cache daemons are writing to the same files
|
||||
# WHISPER_LOCK_WRITES = False
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
|
||||
# empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
||||
|
||||
# Enable AMQP if you want to receve metrics using an amqp broker
|
||||
# ENABLE_AMQP = False
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
# useful for testing
|
||||
# AMQP_VERBOSE = False
|
||||
|
||||
# AMQP_HOST = localhost
|
||||
# AMQP_PORT = 5672
|
||||
# AMQP_VHOST = /
|
||||
# AMQP_USER = guest
|
||||
# AMQP_PASSWORD = guest
|
||||
# AMQP_EXCHANGE = graphite
|
||||
# AMQP_METRIC_NAME_IN_BODY = False
|
||||
|
||||
# The manhole interface allows you to SSH into the carbon daemon
|
||||
# and get a python interpreter. BE CAREFUL WITH THIS! If you do
|
||||
# something like time.sleep() in the interpreter, the whole process
|
||||
# will sleep! This is *extremely* helpful in debugging, assuming
|
||||
# you are familiar with the code. If you are not, please don't
|
||||
# mess with this, you are asking for trouble :)
|
||||
#
|
||||
# ENABLE_MANHOLE = False
|
||||
# MANHOLE_INTERFACE = 127.0.0.1
|
||||
# MANHOLE_PORT = 7222
|
||||
# MANHOLE_USER = admin
|
||||
# MANHOLE_PUBLIC_KEY = ssh-rsa AAAAB3NzaC1yc2EAAAABiwAaAIEAoxN0sv/e4eZCPpi3N3KYvyzRaBaMeS2RsOQ/cDuKv11dlNzVeiyc3RFmCv5Rjwn/lQ79y0zyHxw67qLyhQ/kDzINc4cY41ivuQXm2tPmgvexdrBv5nsfEpjs3gLZfJnyvlcVyWK/lId8WUvEWSWHTzsbtmXAF2raJMdgLTbQ8wE=
|
||||
|
||||
# Patterns for all of the metrics this machine will store. Read more at
|
||||
# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
|
||||
#
|
||||
# Example: store all sales, linux servers, and utilization metrics
|
||||
# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
|
||||
#
|
||||
# Example: store everything
|
||||
# BIND_PATTERNS = #
|
||||
|
||||
# To configure special settings for the carbon-cache instance 'b', uncomment this:
|
||||
#[cache:b]
|
||||
#LINE_RECEIVER_PORT = 2103
|
||||
#PICKLE_RECEIVER_PORT = 2104
|
||||
#CACHE_QUERY_PORT = 7102
|
||||
# and any other settings you want to customize, defaults are inherited
|
||||
# from [carbon] section.
|
||||
# You can then specify the --instance=b option to manage this instance
|
||||
|
||||
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2013
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2014
|
||||
|
||||
# Set to false to disable logging of successful connections
|
||||
LOG_LISTENER_CONNECTIONS = True
|
||||
|
||||
# Carbon-relay has several options for metric routing controlled by RELAY_METHOD
|
||||
#
|
||||
# Use relay-rules.conf to route metrics to destinations based on pattern rules
|
||||
#RELAY_METHOD = rules
|
||||
#
|
||||
# Use consistent-hashing for even distribution of metrics between destinations
|
||||
#RELAY_METHOD = consistent-hashing
|
||||
#
|
||||
# Use consistent-hashing but take into account an aggregation-rules.conf shared
|
||||
# by downstream carbon-aggregator daemons. This will ensure that all metrics
|
||||
# that map to a given aggregation rule are sent to the same carbon-aggregator
|
||||
# instance.
|
||||
# Enable this for carbon-relays that send to a group of carbon-aggregators
|
||||
#RELAY_METHOD = aggregated-consistent-hashing
|
||||
RELAY_METHOD = rules
|
||||
|
||||
# If you use consistent-hashing you can add redundancy by replicating every
|
||||
# datapoint to more than one machine.
|
||||
REPLICATION_FACTOR = 1
|
||||
|
||||
# This is a list of carbon daemons we will send any relayed or
|
||||
# generated metrics to. The default provided would send to a single
|
||||
# carbon-cache instance on the default port. However if you
|
||||
# use multiple carbon-cache instances then it would look like this:
|
||||
#
|
||||
# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
|
||||
#
|
||||
# The general form is IP:PORT:INSTANCE where the :INSTANCE part is
|
||||
# optional and refers to the "None" instance if omitted.
|
||||
#
|
||||
# Note that if the destinations are all carbon-caches then this should
|
||||
# exactly match the webapp's CARBONLINK_HOSTS setting in terms of
|
||||
# instances listed (order matters!).
|
||||
#
|
||||
# If using RELAY_METHOD = rules, all destinations used in relay-rules.conf
|
||||
# must be defined in this list
|
||||
DESTINATIONS = 127.0.0.1:2004
|
||||
|
||||
# This defines the maximum "message size" between carbon daemons.
|
||||
# You shouldn't need to tune this unless you really know what you're doing.
|
||||
MAX_DATAPOINTS_PER_MESSAGE = 500
|
||||
MAX_QUEUE_SIZE = 10000
|
||||
|
||||
# Set this to False to drop datapoints when any send queue (sending datapoints
|
||||
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
|
||||
# default) then sockets over which metrics are received will temporarily stop accepting
|
||||
# data until the send queues fall below 80% MAX_QUEUE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
|
||||
# empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
||||
|
||||
|
||||
[aggregator]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2023
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2024
|
||||
|
||||
# Set to false to disable logging of successful connections
|
||||
LOG_LISTENER_CONNECTIONS = True
|
||||
|
||||
# If set true, metric received will be forwarded to DESTINATIONS in addition to
|
||||
# the output of the aggregation rules. If set false the carbon-aggregator will
|
||||
# only ever send the output of aggregation.
|
||||
FORWARD_ALL = True
|
||||
|
||||
# This is a list of carbon daemons we will send any relayed or
|
||||
# generated metrics to. The default provided would send to a single
|
||||
# carbon-cache instance on the default port. However if you
|
||||
# use multiple carbon-cache instances then it would look like this:
|
||||
#
|
||||
# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
|
||||
#
|
||||
# The format is comma-delimited IP:PORT:INSTANCE where the :INSTANCE part is
|
||||
# optional and refers to the "None" instance if omitted.
|
||||
#
|
||||
# Note that if the destinations are all carbon-caches then this should
|
||||
# exactly match the webapp's CARBONLINK_HOSTS setting in terms of
|
||||
# instances listed (order matters!).
|
||||
DESTINATIONS = 127.0.0.1:2004
|
||||
|
||||
# If you want to add redundancy to your data by replicating every
|
||||
# datapoint to more than one machine, increase this.
|
||||
REPLICATION_FACTOR = 1
|
||||
|
||||
# This is the maximum number of datapoints that can be queued up
|
||||
# for a single destination. Once this limit is hit, we will
|
||||
# stop accepting new data if USE_FLOW_CONTROL is True, otherwise
|
||||
# we will drop any subsequently received datapoints.
|
||||
MAX_QUEUE_SIZE = 10000
|
||||
|
||||
# Set this to False to drop datapoints when any send queue (sending datapoints
|
||||
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
|
||||
# default) then sockets over which metrics are received will temporarily stop accepting
|
||||
# data until the send queues fall below 80% MAX_QUEUE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# This defines the maximum "message size" between carbon daemons.
|
||||
# You shouldn't need to tune this unless you really know what you're doing.
|
||||
MAX_DATAPOINTS_PER_MESSAGE = 500
|
||||
|
||||
# This defines how many datapoints the aggregator remembers for
|
||||
# each metric. Aggregation only happens for datapoints that fall in
|
||||
# the past MAX_AGGREGATION_INTERVALS * intervalSize seconds.
|
||||
MAX_AGGREGATION_INTERVALS = 5
|
||||
|
||||
# By default (WRITE_BACK_FREQUENCY = 0), carbon-aggregator will write back
|
||||
# aggregated data points once every rule.frequency seconds, on a per-rule basis.
|
||||
# Set this (WRITE_BACK_FREQUENCY = N) to write back all aggregated data points
|
||||
# every N seconds, independent of rule frequency. This is useful, for example,
|
||||
# to be able to query partially aggregated metrics from carbon-cache without
|
||||
# having to first wait rule.frequency seconds.
|
||||
# WRITE_BACK_FREQUENCY = 0
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
|
||||
# empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
||||
@@ -0,0 +1,57 @@
|
||||
# This configuration file controls the behavior of the Dashboard UI, available
|
||||
# at http://my-graphite-server/dashboard/.
|
||||
#
|
||||
# This file must contain a [ui] section that defines values for all of the
|
||||
# following settings.
|
||||
[ui]
|
||||
default_graph_width = 400
|
||||
default_graph_height = 250
|
||||
automatic_variants = true
|
||||
refresh_interval = 60
|
||||
autocomplete_delay = 375
|
||||
merge_hover_delay = 750
|
||||
|
||||
# You can set this 'default', 'white', or a custom theme name.
|
||||
# To create a custom theme, copy the dashboard-default.css file
|
||||
# to dashboard-myThemeName.css in the content/css directory and
|
||||
# modify it to your liking.
|
||||
theme = default
|
||||
|
||||
[keyboard-shortcuts]
|
||||
toggle_toolbar = ctrl-z
|
||||
toggle_metrics_panel = ctrl-space
|
||||
erase_all_graphs = alt-x
|
||||
save_dashboard = alt-s
|
||||
completer_add_metrics = alt-enter
|
||||
completer_del_metrics = alt-backspace
|
||||
give_completer_focus = shift-space
|
||||
|
||||
# These settings apply to the UI as a whole, all other sections in this file
|
||||
# pertain only to specific metric types.
|
||||
#
|
||||
# The dashboard presents only metrics that fall into specified naming schemes
|
||||
# defined in this file. This creates a simpler, more targetted view of the
|
||||
# data. The general form for defining a naming scheme is as follows:
|
||||
#
|
||||
#[Metric Type]
|
||||
#scheme = basis.path.<field1>.<field2>.<fieldN>
|
||||
#field1.label = Foo
|
||||
#field2.label = Bar
|
||||
#
|
||||
#
|
||||
# Where each <field> will be displayed as a dropdown box
|
||||
# in the UI and the remaining portion of the namespace
|
||||
# shown in the Metric Selector panel. The .label options set the labels
|
||||
# displayed for each dropdown.
|
||||
#
|
||||
# For example:
|
||||
#
|
||||
#[Sales]
|
||||
#scheme = sales.<channel>.<type>.<brand>
|
||||
#channel.label = Channel
|
||||
#type.label = Product Type
|
||||
#brand.label = Brand
|
||||
#
|
||||
# This defines a 'Sales' metric type that uses 3 dropdowns in the Context Selector
|
||||
# (the upper-left panel) while any deeper metrics (per-product counts or revenue, etc)
|
||||
# will be available in the Metric Selector (upper-right panel).
|
||||
@@ -0,0 +1,38 @@
|
||||
[default]
|
||||
background = black
|
||||
foreground = white
|
||||
majorLine = white
|
||||
minorLine = grey
|
||||
lineColors = blue,green,red,purple,brown,yellow,aqua,grey,magenta,pink,gold,rose
|
||||
fontName = Sans
|
||||
fontSize = 10
|
||||
fontBold = False
|
||||
fontItalic = False
|
||||
|
||||
[noc]
|
||||
background = black
|
||||
foreground = white
|
||||
majorLine = white
|
||||
minorLine = grey
|
||||
lineColors = blue,green,red,yellow,purple,brown,aqua,grey,magenta,pink,gold,rose
|
||||
fontName = Sans
|
||||
fontSize = 10
|
||||
fontBold = False
|
||||
fontItalic = False
|
||||
|
||||
[plain]
|
||||
background = white
|
||||
foreground = black
|
||||
minorLine = grey
|
||||
majorLine = rose
|
||||
|
||||
[summary]
|
||||
background = black
|
||||
lineColors = #6666ff, #66ff66, #ff6666
|
||||
|
||||
[alphas]
|
||||
background = white
|
||||
foreground = black
|
||||
majorLine = grey
|
||||
minorLine = rose
|
||||
lineColors = 00ff00aa,ff000077,00337799
|
||||
@@ -0,0 +1,21 @@
|
||||
# Relay destination rules for carbon-relay. Entries are scanned in order,
|
||||
# and the first pattern a metric matches will cause processing to cease after sending
|
||||
# unless `continue` is set to true
|
||||
#
|
||||
# [name]
|
||||
# pattern = <regex>
|
||||
# destinations = <list of destination addresses>
|
||||
# continue = <boolean> # default: False
|
||||
#
|
||||
# name: Arbitrary unique name to identify the rule
|
||||
# pattern: Regex pattern to match against the metric name
|
||||
# destinations: Comma-separated list of destinations.
|
||||
# ex: 127.0.0.1, 10.1.2.3:2004, 10.1.2.4:2004:a, myserver.mydomain.com
|
||||
# continue: Continue processing rules if this rule matches (default: False)
|
||||
|
||||
# You must have exactly one section with 'default = true'
|
||||
# Note that all destinations listed must also exist in carbon.conf
|
||||
# in the DESTINATIONS setting in the [relay] section
|
||||
[default]
|
||||
default = true
|
||||
destinations = 127.0.0.1:2004:a, 127.0.0.1:2104:b
|
||||
@@ -0,0 +1,18 @@
|
||||
# This file defines regular expression patterns that can be used to
|
||||
# rewrite metric names in a search & replace fashion. It consists of two
|
||||
# sections, [pre] and [post]. The rules in the pre section are applied to
|
||||
# metric names as soon as they are received. The post rules are applied
|
||||
# after aggregation has taken place.
|
||||
#
|
||||
# The general form of each rule is as follows:
|
||||
#
|
||||
# regex-pattern = replacement-text
|
||||
#
|
||||
# For example:
|
||||
#
|
||||
# [post]
|
||||
# _sum$ =
|
||||
# _avg$ =
|
||||
#
|
||||
# These rules would strip off a suffix of _sum or _avg from any metric names
|
||||
# after aggregation.
|
||||
@@ -0,0 +1,43 @@
|
||||
# Aggregation methods for whisper files. Entries are scanned in order,
|
||||
# and first match wins. This file is scanned for changes every 60 seconds
|
||||
#
|
||||
# [name]
|
||||
# pattern = <regex>
|
||||
# xFilesFactor = <float between 0 and 1>
|
||||
# aggregationMethod = <average|sum|last|max|min>
|
||||
#
|
||||
# name: Arbitrary unique name for the rule
|
||||
# pattern: Regex pattern to match against the metric name
|
||||
# xFilesFactor: Ratio of valid data points required for aggregation to the next retention to occur
|
||||
# aggregationMethod: function to apply to data points for aggregation
|
||||
#
|
||||
[min]
|
||||
pattern = \.lower$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = min
|
||||
|
||||
[max]
|
||||
pattern = \.upper(_\d+)?$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = max
|
||||
|
||||
[sum]
|
||||
pattern = \.sum$
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[count]
|
||||
pattern = \.count$
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[count_legacy]
|
||||
pattern = ^stats_counts.*
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[default_average]
|
||||
pattern = .*
|
||||
xFilesFactor = 0.3
|
||||
aggregationMethod = average
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
# Schema definitions for Whisper files. Entries are scanned in order,
|
||||
[carbon]
|
||||
pattern = ^carbon\..*
|
||||
retentions = 1m:31d,10m:1y,1h:5y
|
||||
|
||||
[highres]
|
||||
pattern = ^highres.*
|
||||
retentions = 1s:1d,1m:7d
|
||||
|
||||
[statsd]
|
||||
pattern = ^statsd.*
|
||||
retentions = 1m:7d,10m:1y
|
||||
|
||||
[default]
|
||||
pattern = .*
|
||||
retentions = 10s:1d,1m:7d,10m:1y
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
# This file takes a single regular expression per line
|
||||
# If USE_WHITELIST is set to True in carbon.conf, only metrics received which
|
||||
# match one of these expressions will be persisted. If this file is empty or
|
||||
# missing, all metrics will pass through.
|
||||
# This file is reloaded automatically when changes are made
|
||||
.*
|
||||
@@ -0,0 +1,94 @@
|
||||
"""Copyright 2008 Orbitz WorldWide
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License."""
|
||||
|
||||
# Django settings for graphite project.
|
||||
# DO NOT MODIFY THIS FILE DIRECTLY - use local_settings.py instead
|
||||
from os.path import dirname, join, abspath
|
||||
|
||||
|
||||
#Django settings below, do not touch!
|
||||
APPEND_SLASH = False
|
||||
TEMPLATE_DEBUG = False
|
||||
|
||||
TEMPLATES = [
|
||||
{
|
||||
'BACKEND': 'django.template.backends.django.DjangoTemplates',
|
||||
'DIRS': [
|
||||
join(dirname( abspath(__file__) ), 'templates')
|
||||
],
|
||||
'APP_DIRS': True,
|
||||
'OPTIONS': {
|
||||
'context_processors': [
|
||||
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
|
||||
# list if you haven't customized them:
|
||||
'django.contrib.auth.context_processors.auth',
|
||||
'django.template.context_processors.debug',
|
||||
'django.template.context_processors.i18n',
|
||||
'django.template.context_processors.media',
|
||||
'django.template.context_processors.static',
|
||||
'django.template.context_processors.tz',
|
||||
'django.contrib.messages.context_processors.messages',
|
||||
],
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
# Language code for this installation. All choices can be found here:
|
||||
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
|
||||
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
|
||||
LANGUAGE_CODE = 'en-us'
|
||||
|
||||
# Absolute path to the directory that holds media.
|
||||
MEDIA_ROOT = ''
|
||||
|
||||
# URL that handles the media served from MEDIA_ROOT.
|
||||
# Example: "http://media.lawrence.com"
|
||||
MEDIA_URL = ''
|
||||
|
||||
MIDDLEWARE_CLASSES = (
|
||||
'graphite.middleware.LogExceptionsMiddleware',
|
||||
'django.middleware.common.CommonMiddleware',
|
||||
'django.middleware.gzip.GZipMiddleware',
|
||||
'django.contrib.sessions.middleware.SessionMiddleware',
|
||||
'django.contrib.auth.middleware.AuthenticationMiddleware',
|
||||
'django.contrib.messages.middleware.MessageMiddleware',
|
||||
)
|
||||
|
||||
ROOT_URLCONF = 'graphite.urls'
|
||||
|
||||
INSTALLED_APPS = (
|
||||
'graphite.metrics',
|
||||
'graphite.render',
|
||||
'graphite.browser',
|
||||
'graphite.composer',
|
||||
'graphite.account',
|
||||
'graphite.dashboard',
|
||||
'graphite.whitelist',
|
||||
'graphite.events',
|
||||
'graphite.url_shortener',
|
||||
'django.contrib.auth',
|
||||
'django.contrib.sessions',
|
||||
'django.contrib.admin',
|
||||
'django.contrib.contenttypes',
|
||||
'django.contrib.staticfiles',
|
||||
'tagging',
|
||||
)
|
||||
|
||||
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
|
||||
|
||||
GRAPHITE_WEB_APP_SETTINGS_LOADED = True
|
||||
|
||||
STATIC_URL = '/static/'
|
||||
|
||||
STATIC_ROOT = '/opt/graphite/static/'
|
||||
@@ -0,0 +1,215 @@
|
||||
## Graphite local_settings.py
|
||||
# Edit this file to customize the default Graphite webapp settings
|
||||
#
|
||||
# Additional customizations to Django settings can be added to this file as well
|
||||
|
||||
#####################################
|
||||
# General Configuration #
|
||||
#####################################
|
||||
# Set this to a long, random unique string to use as a secret key for this
|
||||
# install. This key is used for salting of hashes used in auth tokens,
|
||||
# CRSF middleware, cookie storage, etc. This should be set identically among
|
||||
# instances if used behind a load balancer.
|
||||
#SECRET_KEY = 'UNSAFE_DEFAULT'
|
||||
|
||||
# In Django 1.5+ set this to the list of hosts your graphite instances is
|
||||
# accessible as. See:
|
||||
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-ALLOWED_HOSTS
|
||||
#ALLOWED_HOSTS = [ '*' ]
|
||||
|
||||
# Set your local timezone (Django's default is America/Chicago)
|
||||
# If your graphs appear to be offset by a couple hours then this probably
|
||||
# needs to be explicitly set to your local timezone.
|
||||
#TIME_ZONE = 'America/Los_Angeles'
|
||||
|
||||
# Override this to provide documentation specific to your Graphite deployment
|
||||
#DOCUMENTATION_URL = "http://graphite.readthedocs.org/"
|
||||
|
||||
# Logging
|
||||
#LOG_RENDERING_PERFORMANCE = True
|
||||
#LOG_CACHE_PERFORMANCE = True
|
||||
#LOG_METRIC_ACCESS = True
|
||||
|
||||
# Enable full debug page display on exceptions (Internal Server Error pages)
|
||||
#DEBUG = True
|
||||
|
||||
# If using RRD files and rrdcached, set to the address or socket of the daemon
|
||||
#FLUSHRRDCACHED = 'unix:/var/run/rrdcached.sock'
|
||||
|
||||
# This lists the memcached servers that will be used by this webapp.
|
||||
# If you have a cluster of webapps you should ensure all of them
|
||||
# have the *exact* same value for this setting. That will maximize cache
|
||||
# efficiency. Setting MEMCACHE_HOSTS to be empty will turn off use of
|
||||
# memcached entirely.
|
||||
#
|
||||
# You should not use the loopback address (127.0.0.1) here if using clustering
|
||||
# as every webapp in the cluster should use the exact same values to prevent
|
||||
# unneeded cache misses. Set to [] to disable caching of images and fetched data
|
||||
#MEMCACHE_HOSTS = ['10.10.10.10:11211', '10.10.10.11:11211', '10.10.10.12:11211']
|
||||
#DEFAULT_CACHE_DURATION = 60 # Cache images and data for 1 minute
|
||||
|
||||
|
||||
#####################################
|
||||
# Filesystem Paths #
|
||||
#####################################
|
||||
# Change only GRAPHITE_ROOT if your install is merely shifted from /opt/graphite
|
||||
# to somewhere else
|
||||
#GRAPHITE_ROOT = '/opt/graphite'
|
||||
|
||||
# Most installs done outside of a separate tree such as /opt/graphite will only
|
||||
# need to change these three settings. Note that the default settings for each
|
||||
# of these is relative to GRAPHITE_ROOT
|
||||
#CONF_DIR = '/opt/graphite/conf'
|
||||
#STORAGE_DIR = '/opt/graphite/storage'
|
||||
#CONTENT_DIR = '/opt/graphite/webapp/content'
|
||||
|
||||
# To further or fully customize the paths, modify the following. Note that the
|
||||
# default settings for each of these are relative to CONF_DIR and STORAGE_DIR
|
||||
#
|
||||
## Webapp config files
|
||||
#DASHBOARD_CONF = '/opt/graphite/conf/dashboard.conf'
|
||||
#GRAPHTEMPLATES_CONF = '/opt/graphite/conf/graphTemplates.conf'
|
||||
|
||||
## Data directories
|
||||
# NOTE: If any directory is unreadable in DATA_DIRS it will break metric browsing
|
||||
#WHISPER_DIR = '/opt/graphite/storage/whisper'
|
||||
#RRD_DIR = '/opt/graphite/storage/rrd'
|
||||
#DATA_DIRS = [WHISPER_DIR, RRD_DIR] # Default: set from the above variables
|
||||
#LOG_DIR = '/opt/graphite/storage/log/webapp'
|
||||
#INDEX_FILE = '/opt/graphite/storage/index' # Search index file
|
||||
|
||||
|
||||
#####################################
|
||||
# Email Configuration #
|
||||
#####################################
|
||||
# This is used for emailing rendered Graphs
|
||||
# Default backend is SMTP
|
||||
#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
|
||||
#EMAIL_HOST = 'localhost'
|
||||
#EMAIL_PORT = 25
|
||||
#EMAIL_HOST_USER = ''
|
||||
#EMAIL_HOST_PASSWORD = ''
|
||||
#EMAIL_USE_TLS = False
|
||||
# To drop emails on the floor, enable the Dummy backend:
|
||||
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
|
||||
|
||||
|
||||
#####################################
|
||||
# Authentication Configuration #
|
||||
#####################################
|
||||
## LDAP / ActiveDirectory authentication setup
|
||||
#USE_LDAP_AUTH = True
|
||||
#LDAP_SERVER = "ldap.mycompany.com"
|
||||
#LDAP_PORT = 389
|
||||
# OR
|
||||
#LDAP_URI = "ldaps://ldap.mycompany.com:636"
|
||||
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_PASS = "readonly_account_password"
|
||||
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
|
||||
#
|
||||
# If you want to further customize the ldap connection options you should
|
||||
# directly use ldap.set_option to set the ldap module's global options.
|
||||
# For example:
|
||||
#
|
||||
#import ldap
|
||||
#ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
|
||||
#ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, "/etc/ssl/ca")
|
||||
#ldap.set_option(ldap.OPT_X_TLS_CERTFILE, "/etc/ssl/mycert.pem")
|
||||
#ldap.set_option(ldap.OPT_X_TLS_KEYFILE, "/etc/ssl/mykey.pem")
|
||||
# See http://www.python-ldap.org/ for further details on these options.
|
||||
|
||||
## REMOTE_USER authentication. See: https://docs.djangoproject.com/en/dev/howto/auth-remote-user/
|
||||
#USE_REMOTE_USER_AUTHENTICATION = True
|
||||
|
||||
# Override the URL for the login link (e.g. for django_openid_auth)
|
||||
#LOGIN_URL = '/account/login'
|
||||
|
||||
|
||||
##########################
|
||||
# Database Configuration #
|
||||
##########################
|
||||
# By default sqlite is used. If you cluster multiple webapps you will need
|
||||
# to setup an external database (such as MySQL) and configure all of the webapp
|
||||
# instances to use the same database. Note that this database is only used to store
|
||||
# Django models such as saved graphs, dashboards, user preferences, etc.
|
||||
# Metric data is not stored here.
|
||||
#
|
||||
# DO NOT FORGET TO RUN 'manage.py syncdb' AFTER SETTING UP A NEW DATABASE
|
||||
#
|
||||
# The following built-in database engines are available:
|
||||
# django.db.backends.postgresql # Removed in Django 1.4
|
||||
# django.db.backends.postgresql_psycopg2
|
||||
# django.db.backends.mysql
|
||||
# django.db.backends.sqlite3
|
||||
# django.db.backends.oracle
|
||||
#
|
||||
# The default is 'django.db.backends.sqlite3' with file 'graphite.db'
|
||||
# located in STORAGE_DIR
|
||||
#
|
||||
#DATABASES = {
|
||||
# 'default': {
|
||||
# 'NAME': '/opt/graphite/storage/graphite.db',
|
||||
# 'ENGINE': 'django.db.backends.sqlite3',
|
||||
# 'USER': '',
|
||||
# 'PASSWORD': '',
|
||||
# 'HOST': '',
|
||||
# 'PORT': ''
|
||||
# }
|
||||
#}
|
||||
#
|
||||
|
||||
|
||||
#########################
|
||||
# Cluster Configuration #
|
||||
#########################
|
||||
# (To avoid excessive DNS lookups you want to stick to using IP addresses only in this entire section)
|
||||
#
|
||||
# This should list the IP address (and optionally port) of the webapp on each
|
||||
# remote server in the cluster. These servers must each have local access to
|
||||
# metric data. Note that the first server to return a match for a query will be
|
||||
# used.
|
||||
#CLUSTER_SERVERS = ["10.0.2.2:80", "10.0.2.3:80"]
|
||||
|
||||
## These are timeout values (in seconds) for requests to remote webapps
|
||||
#REMOTE_STORE_FETCH_TIMEOUT = 6 # Timeout to fetch series data
|
||||
#REMOTE_STORE_FIND_TIMEOUT = 2.5 # Timeout for metric find requests
|
||||
#REMOTE_STORE_RETRY_DELAY = 60 # Time before retrying a failed remote webapp
|
||||
#REMOTE_FIND_CACHE_DURATION = 300 # Time to cache remote metric find results
|
||||
|
||||
## Remote rendering settings
|
||||
# Set to True to enable rendering of Graphs on a remote webapp
|
||||
#REMOTE_RENDERING = True
|
||||
# List of IP (and optionally port) of the webapp on each remote server that
|
||||
# will be used for rendering. Note that each rendering host should have local
|
||||
# access to metric data or should have CLUSTER_SERVERS configured
|
||||
#RENDERING_HOSTS = []
|
||||
#REMOTE_RENDER_CONNECT_TIMEOUT = 1.0
|
||||
|
||||
# If you are running multiple carbon-caches on this machine (typically behind a relay using
|
||||
# consistent hashing), you'll need to list the ip address, cache query port, and instance name of each carbon-cache
|
||||
# instance on the local machine (NOT every carbon-cache in the entire cluster). The default cache query port is 7002
|
||||
# and a common scheme is to use 7102 for instance b, 7202 for instance c, etc.
|
||||
#
|
||||
# You *should* use 127.0.0.1 here in most cases
|
||||
#CARBONLINK_HOSTS = ["127.0.0.1:7002:a", "127.0.0.1:7102:b", "127.0.0.1:7202:c"]
|
||||
#CARBONLINK_TIMEOUT = 1.0
|
||||
|
||||
#####################################
|
||||
# Additional Django Settings #
|
||||
#####################################
|
||||
# Uncomment the following line for direct access to Django settings such as
|
||||
# MIDDLEWARE_CLASSES or APPS
|
||||
#from graphite.app_settings import *
|
||||
|
||||
import os
|
||||
|
||||
LOG_DIR = '/var/log/graphite'
|
||||
SECRET_KEY = '$(date +%s | sha256sum | base64 | head -c 64)'
|
||||
|
||||
if (os.getenv("MEMCACHE_HOST") is not None):
|
||||
MEMCACHE_HOSTS = os.getenv("MEMCACHE_HOST").split(",")
|
||||
|
||||
if (os.getenv("DEFAULT_CACHE_DURATION") is not None):
|
||||
DEFAULT_CACHE_DURATION = int(os.getenv("CACHE_DURATION"))
|
||||
|
||||
6
docker/blocks/graphite1/conf/opt/statsd/config.js
Normal file
6
docker/blocks/graphite1/conf/opt/statsd/config.js
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"graphiteHost": "127.0.0.1",
|
||||
"graphitePort": 2003,
|
||||
"port": 8125,
|
||||
"flushInterval": 10000
|
||||
}
|
||||
26
docker/blocks/graphite1/conf/usr/local/bin/django_admin_init.exp
Executable file
26
docker/blocks/graphite1/conf/usr/local/bin/django_admin_init.exp
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env expect
|
||||
|
||||
set timeout -1
|
||||
spawn /usr/local/bin/manage.sh
|
||||
|
||||
expect "Would you like to create one now" {
|
||||
send "yes\r"
|
||||
}
|
||||
|
||||
expect "Username" {
|
||||
send "root\r"
|
||||
}
|
||||
|
||||
expect "Email address:" {
|
||||
send "root.graphite@mailinator.com\r"
|
||||
}
|
||||
|
||||
expect "Password:" {
|
||||
send "root\r"
|
||||
}
|
||||
|
||||
expect "Password *:" {
|
||||
send "root\r"
|
||||
}
|
||||
|
||||
expect "Superuser created successfully"
|
||||
3
docker/blocks/graphite1/conf/usr/local/bin/manage.sh
Normal file
3
docker/blocks/graphite1/conf/usr/local/bin/manage.sh
Normal file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
PYTHONPATH=/opt/graphite/webapp django-admin.py syncdb --settings=graphite.settings
|
||||
PYTHONPATH=/opt/graphite/webapp django-admin.py update_users --settings=graphite.settings
|
||||
16
docker/blocks/graphite1/fig
Normal file
16
docker/blocks/graphite1/fig
Normal file
@@ -0,0 +1,16 @@
|
||||
graphite:
|
||||
build: blocks/graphite1
|
||||
ports:
|
||||
- "8080:80"
|
||||
- "2003:2003"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
|
||||
fake-graphite-data:
|
||||
image: grafana/fake-data-gen
|
||||
net: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: graphite
|
||||
FD_PORT: 2003
|
||||
|
||||
76
docker/blocks/graphite1/files/carbon.conf
Normal file
76
docker/blocks/graphite1/files/carbon.conf
Normal file
@@ -0,0 +1,76 @@
|
||||
[cache]
|
||||
LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
# means the number of write requests sent to the disk. This is intended to
|
||||
# prevent over-utilizing the disk and thus starving the rest of the system.
|
||||
# When the rate of required updates exceeds this, then carbon's caching will
|
||||
# take effect and increase the overall throughput accordingly.
|
||||
MAX_UPDATES_PER_SECOND = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (like at 50) is a good way to ensure your graphite
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that it will take much longer for those metrics'
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
MAX_CREATES_PER_MINUTE = inf
|
||||
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0
|
||||
CACHE_QUERY_PORT = 7002
|
||||
|
||||
LOG_UPDATES = False
|
||||
|
||||
# Enable AMQP if you want to receve metrics using an amqp broker
|
||||
# ENABLE_AMQP = False
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
# useful for testing
|
||||
# AMQP_VERBOSE = False
|
||||
|
||||
# AMQP_HOST = localhost
|
||||
# AMQP_PORT = 5672
|
||||
# AMQP_VHOST = /
|
||||
# AMQP_USER = guest
|
||||
# AMQP_PASSWORD = guest
|
||||
# AMQP_EXCHANGE = graphite
|
||||
|
||||
# Patterns for all of the metrics this machine will store. Read more at
|
||||
# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
|
||||
#
|
||||
# Example: store all sales, linux servers, and utilization metrics
|
||||
# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
|
||||
#
|
||||
# Example: store everything
|
||||
# BIND_PATTERNS = #
|
||||
|
||||
# NOTE: you cannot run both a cache and a relay on the same server
|
||||
# with the default configuration, you have to specify a distinict
|
||||
# interfaces and ports for the listeners.
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_SERVERS = server1, server2, server3
|
||||
MAX_QUEUE_SIZE = 10000
|
||||
102
docker/blocks/graphite1/files/events_views.py
Normal file
102
docker/blocks/graphite1/files/events_views.py
Normal file
@@ -0,0 +1,102 @@
|
||||
import datetime
|
||||
import time
|
||||
|
||||
from django.utils.timezone import get_current_timezone
|
||||
from django.core.urlresolvers import get_script_prefix
|
||||
from django.http import HttpResponse
|
||||
from django.shortcuts import render_to_response, get_object_or_404
|
||||
from pytz import timezone
|
||||
|
||||
from graphite.util import json
|
||||
from graphite.events import models
|
||||
from graphite.render.attime import parseATTime
|
||||
|
||||
|
||||
def to_timestamp(dt):
|
||||
return time.mktime(dt.timetuple())
|
||||
|
||||
|
||||
class EventEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, datetime.datetime):
|
||||
return to_timestamp(obj)
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
def view_events(request):
|
||||
if request.method == "GET":
|
||||
context = { 'events' : fetch(request),
|
||||
'slash' : get_script_prefix()
|
||||
}
|
||||
return render_to_response("events.html", context)
|
||||
else:
|
||||
return post_event(request)
|
||||
|
||||
def detail(request, event_id):
|
||||
e = get_object_or_404(models.Event, pk=event_id)
|
||||
context = { 'event' : e,
|
||||
'slash' : get_script_prefix()
|
||||
}
|
||||
return render_to_response("event.html", context)
|
||||
|
||||
|
||||
def post_event(request):
|
||||
if request.method == 'POST':
|
||||
event = json.loads(request.body)
|
||||
assert isinstance(event, dict)
|
||||
|
||||
values = {}
|
||||
values["what"] = event["what"]
|
||||
values["tags"] = event.get("tags", None)
|
||||
values["when"] = datetime.datetime.fromtimestamp(
|
||||
event.get("when", time.time()))
|
||||
if "data" in event:
|
||||
values["data"] = event["data"]
|
||||
|
||||
e = models.Event(**values)
|
||||
e.save()
|
||||
|
||||
return HttpResponse(status=200)
|
||||
else:
|
||||
return HttpResponse(status=405)
|
||||
|
||||
def get_data(request):
|
||||
if 'jsonp' in request.REQUEST:
|
||||
response = HttpResponse(
|
||||
"%s(%s)" % (request.REQUEST.get('jsonp'),
|
||||
json.dumps(fetch(request), cls=EventEncoder)),
|
||||
mimetype='text/javascript')
|
||||
else:
|
||||
response = HttpResponse(
|
||||
json.dumps(fetch(request), cls=EventEncoder),
|
||||
mimetype="application/json")
|
||||
return response
|
||||
|
||||
def fetch(request):
|
||||
#XXX we need to move to USE_TZ=True to get rid of naive-time conversions
|
||||
def make_naive(dt):
|
||||
if 'tz' in request.GET:
|
||||
tz = timezone(request.GET['tz'])
|
||||
else:
|
||||
tz = get_current_timezone()
|
||||
local_dt = dt.astimezone(tz)
|
||||
if hasattr(local_dt, 'normalize'):
|
||||
local_dt = local_dt.normalize()
|
||||
return local_dt.replace(tzinfo=None)
|
||||
|
||||
if request.GET.get("from", None) is not None:
|
||||
time_from = make_naive(parseATTime(request.GET["from"]))
|
||||
else:
|
||||
time_from = datetime.datetime.fromtimestamp(0)
|
||||
|
||||
if request.GET.get("until", None) is not None:
|
||||
time_until = make_naive(parseATTime(request.GET["until"]))
|
||||
else:
|
||||
time_until = datetime.datetime.now()
|
||||
|
||||
tags = request.GET.get("tags", None)
|
||||
if tags is not None:
|
||||
tags = request.GET.get("tags").split(" ")
|
||||
|
||||
return [x.as_dict() for x in
|
||||
models.Event.find_events(time_from, time_until, tags=tags)]
|
||||
20
docker/blocks/graphite1/files/initial_data.json
Normal file
20
docker/blocks/graphite1/files/initial_data.json
Normal file
@@ -0,0 +1,20 @@
|
||||
[
|
||||
{
|
||||
"pk": 1,
|
||||
"model": "auth.user",
|
||||
"fields": {
|
||||
"username": "admin",
|
||||
"first_name": "",
|
||||
"last_name": "",
|
||||
"is_active": true,
|
||||
"is_superuser": true,
|
||||
"is_staff": true,
|
||||
"last_login": "2011-09-20 17:02:14",
|
||||
"groups": [],
|
||||
"user_permissions": [],
|
||||
"password": "sha1$1b11b$edeb0a67a9622f1f2cfeabf9188a711f5ac7d236",
|
||||
"email": "root@example.com",
|
||||
"date_joined": "2011-09-20 17:02:14"
|
||||
}
|
||||
}
|
||||
]
|
||||
42
docker/blocks/graphite1/files/local_settings.py
Normal file
42
docker/blocks/graphite1/files/local_settings.py
Normal file
@@ -0,0 +1,42 @@
|
||||
# Edit this file to override the default graphite settings, do not edit settings.py
|
||||
|
||||
# Turn on debugging and restart apache if you ever see an "Internal Server Error" page
|
||||
#DEBUG = True
|
||||
|
||||
# Set your local timezone (django will try to figure this out automatically)
|
||||
TIME_ZONE = 'UTC'
|
||||
|
||||
# Setting MEMCACHE_HOSTS to be empty will turn off use of memcached entirely
|
||||
#MEMCACHE_HOSTS = ['127.0.0.1:11211']
|
||||
|
||||
# Sometimes you need to do a lot of rendering work but cannot share your storage mount
|
||||
#REMOTE_RENDERING = True
|
||||
#RENDERING_HOSTS = ['fastserver01','fastserver02']
|
||||
#LOG_RENDERING_PERFORMANCE = True
|
||||
#LOG_CACHE_PERFORMANCE = True
|
||||
|
||||
# If you've got more than one backend server they should all be listed here
|
||||
#CLUSTER_SERVERS = []
|
||||
|
||||
# Override this if you need to provide documentation specific to your graphite deployment
|
||||
#DOCUMENTATION_URL = "http://wiki.mycompany.com/graphite"
|
||||
|
||||
# Enable email-related features
|
||||
#SMTP_SERVER = "mail.mycompany.com"
|
||||
|
||||
# LDAP / ActiveDirectory authentication setup
|
||||
#USE_LDAP_AUTH = True
|
||||
#LDAP_SERVER = "ldap.mycompany.com"
|
||||
#LDAP_PORT = 389
|
||||
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_PASS = "readonly_account_password"
|
||||
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
|
||||
|
||||
# If sqlite won't cut it, configure your real database here (don't forget to run manage.py syncdb!)
|
||||
#DATABASE_ENGINE = 'mysql' # or 'postgres'
|
||||
#DATABASE_NAME = 'graphite'
|
||||
#DATABASE_USER = 'graphite'
|
||||
#DATABASE_PASSWORD = 'graphite-is-awesome'
|
||||
#DATABASE_HOST = 'mysql.mycompany.com'
|
||||
#DATABASE_PORT = '3306'
|
||||
1
docker/blocks/graphite1/files/my_htpasswd
Normal file
1
docker/blocks/graphite1/files/my_htpasswd
Normal file
@@ -0,0 +1 @@
|
||||
grafana:$apr1$4R/20xhC$8t37jPP5dbcLr48btdkU//
|
||||
70
docker/blocks/graphite1/files/nginx.conf
Normal file
70
docker/blocks/graphite1/files/nginx.conf
Normal file
@@ -0,0 +1,70 @@
|
||||
daemon off;
|
||||
user www-data;
|
||||
worker_processes 1;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
server_tokens off;
|
||||
|
||||
server_names_hash_bucket_size 32;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
server_name _;
|
||||
|
||||
open_log_file_cache max=1000 inactive=20s min_uses=2 valid=1m;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:8000;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Server $host;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header Host $host;
|
||||
|
||||
client_max_body_size 10m;
|
||||
client_body_buffer_size 128k;
|
||||
|
||||
proxy_connect_timeout 90;
|
||||
proxy_send_timeout 90;
|
||||
proxy_read_timeout 90;
|
||||
|
||||
proxy_buffer_size 4k;
|
||||
proxy_buffers 4 32k;
|
||||
proxy_busy_buffers_size 64k;
|
||||
proxy_temp_file_write_size 64k;
|
||||
}
|
||||
|
||||
add_header Access-Control-Allow-Origin "*";
|
||||
add_header Access-Control-Allow-Methods "GET, OPTIONS";
|
||||
add_header Access-Control-Allow-Headers "origin, authorization, accept";
|
||||
|
||||
location /content {
|
||||
alias /opt/graphite/webapp/content;
|
||||
|
||||
}
|
||||
|
||||
location /media {
|
||||
alias /usr/share/pyshared/django/contrib/admin/media;
|
||||
}
|
||||
}
|
||||
}
|
||||
8
docker/blocks/graphite1/files/statsd_config.js
Normal file
8
docker/blocks/graphite1/files/statsd_config.js
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
graphitePort: 2003,
|
||||
graphiteHost: "127.0.0.1",
|
||||
port: 8125,
|
||||
mgmt_port: 8126,
|
||||
backends: ['./backends/graphite'],
|
||||
debug: true
|
||||
}
|
||||
19
docker/blocks/graphite1/files/storage-aggregation.conf
Normal file
19
docker/blocks/graphite1/files/storage-aggregation.conf
Normal file
@@ -0,0 +1,19 @@
|
||||
[min]
|
||||
pattern = \.min$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = min
|
||||
|
||||
[max]
|
||||
pattern = \.max$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = max
|
||||
|
||||
[sum]
|
||||
pattern = \.count$
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[default_average]
|
||||
pattern = .*
|
||||
xFilesFactor = 0.5
|
||||
aggregationMethod = average
|
||||
16
docker/blocks/graphite1/files/storage-schemas.conf
Normal file
16
docker/blocks/graphite1/files/storage-schemas.conf
Normal file
@@ -0,0 +1,16 @@
|
||||
[carbon]
|
||||
pattern = ^carbon\..*
|
||||
retentions = 1m:31d,10m:1y,1h:5y
|
||||
|
||||
[highres]
|
||||
pattern = ^highres.*
|
||||
retentions = 1s:1d,1m:7d
|
||||
|
||||
[statsd]
|
||||
pattern = ^statsd.*
|
||||
retentions = 1m:7d,10m:1y
|
||||
|
||||
[default]
|
||||
pattern = .*
|
||||
retentions = 10s:1d,1m:7d,10m:1y
|
||||
|
||||
26
docker/blocks/graphite1/files/supervisord.conf
Normal file
26
docker/blocks/graphite1/files/supervisord.conf
Normal file
@@ -0,0 +1,26 @@
|
||||
[supervisord]
|
||||
nodaemon = true
|
||||
environment = GRAPHITE_STORAGE_DIR='/opt/graphite/storage',GRAPHITE_CONF_DIR='/opt/graphite/conf'
|
||||
|
||||
[program:nginx]
|
||||
command = /usr/sbin/nginx
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
||||
[program:carbon-cache]
|
||||
;user = www-data
|
||||
command = /opt/graphite/bin/carbon-cache.py --debug start
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
||||
[program:graphite-webapp]
|
||||
;user = www-data
|
||||
directory = /opt/graphite/webapp
|
||||
environment = PYTHONPATH='/opt/graphite/webapp'
|
||||
command = /usr/bin/gunicorn_django -b127.0.0.1:8000 -w2 graphite/settings.py
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
||||
@@ -6,3 +6,4 @@ postgrestest:
|
||||
POSTGRES_DATABASE: grafana
|
||||
ports:
|
||||
- "5432:5432"
|
||||
command: postgres -c log_connections=on -c logging_collector=on -c log_destination=stderr -c log_directory=/var/log/postgresql
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
# Building The Docs
|
||||
|
||||
To build the docs locally, you need to have docker installed. The
|
||||
docs are built using a custom [docker](https://www.docker.com/) image
|
||||
and the [mkdocs](http://www.mkdocs.org/) tool.
|
||||
docs are built using [Hugo](http://gohugo.io/) - a static site generator.
|
||||
|
||||
**Prepare the Docker Image**:
|
||||
|
||||
@@ -11,19 +10,40 @@ when running ``make docs-build`` depending on how your system's docker
|
||||
service is configured):
|
||||
|
||||
```
|
||||
$ git clone https://github.com/grafana/grafana.org
|
||||
$ cd grafana.org
|
||||
$ make docs-build
|
||||
git clone https://github.com/grafana/grafana.org
|
||||
cd grafana.org
|
||||
make docs-build
|
||||
```
|
||||
|
||||
**Build the Documentation**:
|
||||
|
||||
Now that the docker image has been prepared we can build the
|
||||
docs. Switch your working directory back to the directory this file
|
||||
(README.md) is in and run (possibly with ``sudo``):
|
||||
grafana docs and start a docs server.
|
||||
|
||||
If you have not cloned the Grafana repository already then:
|
||||
|
||||
```
|
||||
$ make docs
|
||||
cd ..
|
||||
git clone https://github.com/grafana/grafana
|
||||
```
|
||||
|
||||
Switch your working directory to the directory this file
|
||||
(README.md) is in.
|
||||
|
||||
```
|
||||
cd grafana/docs
|
||||
```
|
||||
|
||||
An AWS config file is required to build the docs Docker image and to publish the site to AWS. If you are building locally only and do not have any AWS credentials for docs.grafana.org then create an empty file named `awsconfig` in the current directory.
|
||||
|
||||
```
|
||||
touch awsconfig
|
||||
```
|
||||
|
||||
Then run (possibly with ``sudo``):
|
||||
|
||||
```
|
||||
make watch
|
||||
```
|
||||
|
||||
This command will not return control of the shell to the user. Instead
|
||||
@@ -32,4 +52,21 @@ we created in the previous step.
|
||||
|
||||
Open [localhost:3004](http://localhost:3004) to view the docs.
|
||||
|
||||
### Images & Content
|
||||
|
||||
All markdown files are located in this repo (main grafana repo). But all images are added to the https://github.com/grafana/grafana.org repo. So the process of adding images is a bit complicated.
|
||||
|
||||
First you need create a feature (PR) branch of https://github.com/grafana/grafana.org so you can make change. Then add the image to the `/static/img/docs` directory. Then make a commit that adds the image.
|
||||
|
||||
Then run:
|
||||
```
|
||||
make docs-build
|
||||
```
|
||||
|
||||
This will rebuild the docs docker container.
|
||||
|
||||
To be able to use the image your have to quit (CTRL-C) the `make watch` command (that you run in the same directory as this README). Then simply rerun `make watch`, it will restart the docs server but now with access to your image.
|
||||
|
||||
### Editing content
|
||||
|
||||
Changes to the markdown files should automatically cause a docs rebuild and live reload should reload the page in your browser.
|
||||
|
||||
@@ -1 +1 @@
|
||||
v4.2
|
||||
v4.3
|
||||
|
||||
15
docs/sources/administration/metrics.md
Normal file
15
docs/sources/administration/metrics.md
Normal file
@@ -0,0 +1,15 @@
|
||||
+++
|
||||
title = "Internal metrics"
|
||||
description = "Internal metrics exposed by Grafana"
|
||||
keywords = ["grafana", "metrics", "internal metrics"]
|
||||
type = "docs"
|
||||
[menu.docs]
|
||||
parent = "admin"
|
||||
weight = 8
|
||||
+++
|
||||
|
||||
# Internal metrics
|
||||
|
||||
Grafana collects some metrics about it self internally. Currently Grafana supports pushing metrics to graphite and exposing them to be scraped by Prometheus.
|
||||
|
||||
To enabled internal metrics you have to enable it under the [metrics] section in your [grafana.ini](http://docs.grafana.org/installation/configuration/#enabled-6) config file.If you want to push metrics to graphite you have also have to configure the [metrics.graphite](http://docs.grafana.org/installation/configuration/#metrics-graphite) section.
|
||||
@@ -22,7 +22,7 @@ to add and configure a `notification` channel (can be email, Pagerduty or other
|
||||
|
||||
{{< imgbox max-width="40%" img="/img/docs/v43/alert_notifications_menu.png" caption="Alerting Notification Channels" >}}
|
||||
|
||||
On the Notification Channels page hit the `New Channel` button to go the the page where you
|
||||
On the Notification Channels page hit the `New Channel` button to go the page where you
|
||||
can configure and setup a new Notification Channel.
|
||||
|
||||
You specify name and type, and type specific options. You can also test the notification to make
|
||||
@@ -92,6 +92,26 @@ Example json body:
|
||||
|
||||
- **state** - The possible values for alert state are: `ok`, `paused`, `alerting`, `pending`, `no_data`.
|
||||
|
||||
### DingDing/DingTalk
|
||||
|
||||
[Instructions in Chinese](https://open-doc.dingtalk.com/docs/doc.htm?spm=a219a.7629140.0.0.p2lr6t&treeId=257&articleId=105733&docType=1).
|
||||
|
||||
In DingTalk PC Client:
|
||||
|
||||
1. Click "more" icon on left bottom of the panel.
|
||||
|
||||
2. Click "Robot Manage" item in the pop menu, there will be a new panel call "Robot Manage".
|
||||
|
||||
3. In the "Robot Manage" panel, select "customised: customised robot with Webhook".
|
||||
|
||||
4. In the next new panel named "robot detail", click "Add" button.
|
||||
|
||||
5. In "Add Robot" panel, input a nickname for the robot and select a "message group" which the robot will join in. click "next".
|
||||
|
||||
6. There will be a Webhook URL in the panel, looks like this: https://oapi.dingtalk.com/robot/send?access_token=xxxxxxxxx. Copy this URL to the grafana Dingtalk setting page and then click "finish".
|
||||
|
||||
Dingtalk supports the following "message type": `text`, `link` and `markdown`. Only the `text` message type is supported.
|
||||
|
||||
### Other Supported Notification Channels
|
||||
|
||||
Grafana also supports the following Notification Channels:
|
||||
@@ -114,7 +134,7 @@ Grafana also supports the following Notification Channels:
|
||||
|
||||
# Enable images in notifications {#external-image-store}
|
||||
|
||||
Grafana can render the panel associated with the alert rule and include that in the notification. Most Notification Channels require that this image be publicly accessable (Slack and PagerDuty for example). In order to include images in alert notifications, Grafana can upload the image to an image store. It currently supports
|
||||
Grafana can render the panel associated with the alert rule and include that in the notification. Most Notification Channels require that this image be publicly accessible (Slack and PagerDuty for example). In order to include images in alert notifications, Grafana can upload the image to an image store. It currently supports
|
||||
Amazon S3 and Webdav for this. So to set that up you need to configure the [external image uploader](/installation/configuration/#external-image-storage) in your grafana-server ini config file.
|
||||
|
||||
Currently only the Email Channels attaches images if no external image store is specified. To include images in alert notifications for other channels then you need to set up an external image store.
|
||||
|
||||
@@ -27,14 +27,12 @@ and the conditions that need to be met for the alert to change state and trigger
|
||||
## Execution
|
||||
|
||||
The alert rules are evaluated in the Grafana backend in a scheduler and query execution engine that is part
|
||||
of core Grafana. Only some data soures are supported right now. They include `Graphite`, `Prometheus`,
|
||||
of core Grafana. Only some data sources are supported right now. They include `Graphite`, `Prometheus`,
|
||||
`InfluxDB` and `OpenTSDB`.
|
||||
|
||||
### Clustering
|
||||
|
||||
We have not implemented clustering yet. So if you run multiple instances of grafana-server
|
||||
you have to make sure [execute_alerts]({{< relref "installation/configuration.md#alerting" >}})
|
||||
is true on only one instance or otherwise you will get duplicated notifications.
|
||||
Currently alerting supports a limited form of high availability. Since v4.2.0 of Grafana, alert notifications are deduped when running multiple servers. This means all alerts are executed on every server but no duplicate alert notifications are sent due to the deduping logic. Proper load balancing of alerts will be introduced in the future.
|
||||
|
||||
<div class="clearfix"></div>
|
||||
|
||||
@@ -52,12 +50,22 @@ Here you can specify the name of the alert rule and how often the scheduler shou
|
||||
### Conditions
|
||||
|
||||
Currently the only condition type that exists is a `Query` condition that allows you to
|
||||
specify a query letter, time range and an aggregation function. The letter refers to
|
||||
a query you already have added in the **Metrics** tab. The result from the query and the aggregation function is
|
||||
a single value that is then used in the threshold check. The query used in an alert rule cannot
|
||||
contain any template variables. Currently we only support `AND` and `OR` operators between conditions and they are executed serially.
|
||||
specify a query letter, time range and an aggregation function.
|
||||
|
||||
|
||||
### Query condition example
|
||||
|
||||
```sql
|
||||
avg() OF query(A, 5m, now) IS BELOW 14
|
||||
```
|
||||
|
||||
- `avg()` Controls how the values for **each** series should be reduced to a value that can be compared against the threshold. Click on the function to change it to another aggregation function.
|
||||
- `query(A, 5m, now)` The letter defines what query to execute from the **Metrics** tab. The second two parameters define the time range, `5m, now` means 5 minutes from now to now. You can also do `10m, now-2m` to define a time range that will be 10 minutes from now to 2 minutes from now. This is useful if you want to ignore the last 2 minutes of data.
|
||||
- `IS BELOW 14` Defines the type of threshold and the threshold value. You can click on `IS BELOW` to change the type of threshold.
|
||||
|
||||
The query used in an alert rule cannot contain any template variables. Currently we only support `AND` and `OR` operators between conditions and they are executed serially.
|
||||
For example, we have 3 conditions in the following order:
|
||||
`condition:A(evaluates to: TRUE) OR condition:B(evaluates to: FALSE) AND condition:C(evaluates to: TRUE)`
|
||||
*condition:A(evaluates to: TRUE) OR condition:B(evaluates to: FALSE) AND condition:C(evaluates to: TRUE)*
|
||||
so the result will be calculated as ((TRUE OR FALSE) AND TRUE) = TRUE.
|
||||
|
||||
We plan to add other condition types in the future, like `Other Alert`, where you can include the state
|
||||
@@ -66,7 +74,7 @@ of another alert in your conditions, and `Time Of Day`.
|
||||
#### Multiple Series
|
||||
|
||||
If a query returns multiple series then the aggregation function and threshold check will be evaluated for each series.
|
||||
What Grafana does not do currently is track alert rule state **per series**. This has implications that is exemplified
|
||||
What Grafana does not do currently is track alert rule state **per series**. This has implications that are detailed
|
||||
in the scenario below.
|
||||
|
||||
- Alert condition with query that returns 2 series: **server1** and **server2**
|
||||
@@ -81,8 +89,7 @@ we plan to track state **per series** in a future release.
|
||||
|
||||
### No Data / Null values
|
||||
|
||||
Below you condition you can configure how the rule evaluation engine should handle queries that return no data or only null valued
|
||||
data.
|
||||
Below your conditions you can configure how the rule evaluation engine should handle queries that return no data or only null values.
|
||||
|
||||
No Data Option | Description
|
||||
------------ | -------------
|
||||
@@ -92,23 +99,23 @@ Keep Last State | Keep the current alert rule state, what ever it is.
|
||||
|
||||
### Execution errors or timeouts
|
||||
|
||||
The last option is how to handle execution or timeout errors.
|
||||
The last option tells how to handle execution or timeout errors.
|
||||
|
||||
Error or timeout option | Description
|
||||
------------ | -------------
|
||||
Alerting | Set alert rule state to `Alerting`
|
||||
Keep Last State | Keep the current alert rule state, what ever it is.
|
||||
|
||||
If you an unreliable time series store that where queries sometime timeout or fail randomly you can set this option
|
||||
t `Keep Last State` to basically ignore them.
|
||||
If you have an unreliable time series store from which queries sometime timeout or fail randomly you can set this option
|
||||
to `Keep Last State` in order to basically ignore them.
|
||||
|
||||
## Notifications
|
||||
|
||||
In alert tab you can also specify alert rule notifications along with a detailed messsage about the alert rule.
|
||||
The message can contain anything, information about how you might solve the issue, link to runbook etc.
|
||||
The message can contain anything, information about how you might solve the issue, link to runbook, etc.
|
||||
|
||||
The actual notifications are configured and shared between multiple alerts. Read the
|
||||
[Notifications]({{< relref "notifications.md" >}}) guide for how to configure and setup notifications.
|
||||
[notifications]({{< relref "notifications.md" >}}) guide for how to configure and setup notifications.
|
||||
|
||||
## Alert State History & Annotations
|
||||
|
||||
@@ -121,7 +128,7 @@ submenu in the alert tab to view & clear state history.
|
||||
{{< imgbox max-width="40%" img="/img/docs/v4/alert_test_rule.png" caption="Test Rule" >}}
|
||||
|
||||
First level of troubleshooting you can do is hit the **Test Rule** button. You will get result back that you can expand
|
||||
to the point where you can see the raw data that was returned form your query.
|
||||
to the point where you can see the raw data that was returned from your query.
|
||||
|
||||
Further troubleshooting can also be done by inspecting the grafana-server log. If it's not an error or for some reason
|
||||
the log does not say anything you can enable debug logging for some relevant components. This is done
|
||||
|
||||
@@ -13,6 +13,10 @@ Here you can find links to older versions of the documentation that might be bet
|
||||
of Grafana.
|
||||
|
||||
- [Latest](http://docs.grafana.org)
|
||||
- [Version 4.4](http://docs.grafana.org/v4.4)
|
||||
- [Version 4.3](http://docs.grafana.org/v4.3)
|
||||
- [Version 4.2](http://docs.grafana.org/v4.2)
|
||||
- [Version 4.1](http://docs.grafana.org/v4.1)
|
||||
- [Version 4.0](http://docs.grafana.org/v4.0)
|
||||
- [Version 3.1](http://docs.grafana.org/v3.1)
|
||||
- [Version 3.0](http://docs.grafana.org/v3.0)
|
||||
|
||||
@@ -84,8 +84,8 @@ Name | Description
|
||||
*metrics(namespace, [region])* | Returns a list of metrics in the namespace. (specify region for custom metrics)
|
||||
*dimension_keys(namespace)* | Returns a list of dimension keys in the namespace.
|
||||
*dimension_values(region, namespace, metric, dimension_key)* | Returns a list of dimension values matching the specified `region`, `namespace`, `metric` and `dimension_key`.
|
||||
*ebs_volume_ids(region, instance_id)* | Returns a list of volume id matching the specified `region`, `instance_id`.
|
||||
*ec2_instance_attribute(region, attribute_name, filters)* | Returns a list of attribute matching the specified `region`, `attribute_name`, `filters`.
|
||||
*ebs_volume_ids(region, instance_id)* | Returns a list of volume ids matching the specified `region`, `instance_id`.
|
||||
*ec2_instance_attribute(region, attribute_name, filters)* | Returns a list of attributes matching the specified `region`, `attribute_name`, `filters`.
|
||||
|
||||
For details about the metrics CloudWatch provides, please refer to the [CloudWatch documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html).
|
||||
|
||||
@@ -101,10 +101,13 @@ Query | Service
|
||||
*dimension_values(us-east-1,AWS/RDS,CPUUtilization,DBInstanceIdentifier)* | RDS
|
||||
*dimension_values(us-east-1,AWS/S3,BucketSizeBytes,BucketName)* | S3
|
||||
|
||||
#### ec2_instance_attribute JSON filters
|
||||
## ec2_instance_attribute examples
|
||||
|
||||
The `ec2_instance_attribute` query take `filters` in JSON format.
|
||||
### JSON filters
|
||||
|
||||
The `ec2_instance_attribute` query takes `filters` in JSON format.
|
||||
You can specify [pre-defined filters of ec2:DescribeInstances](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html).
|
||||
Note that the actual filtering takes place on Amazon's servers, not in Grafana.
|
||||
|
||||
Filters syntax:
|
||||
|
||||
@@ -116,6 +119,45 @@ Example `ec2_instance_attribute()` query
|
||||
|
||||
ec2_instance_attribute(us-east-1, InstanceId, { "tag:Environment": [ "production" ] })
|
||||
|
||||
### Selecting Attributes
|
||||
|
||||
Only 1 attribute per instance can be returned. Any flat attribute can be selected (i.e. if the attribute has a single value and isn't an object or array). Below is a list of available flat attributes:
|
||||
|
||||
* `AmiLaunchIndex`
|
||||
* `Architecture`
|
||||
* `ClientToken`
|
||||
* `EbsOptimized`
|
||||
* `EnaSupport`
|
||||
* `Hypervisor`
|
||||
* `IamInstanceProfile`
|
||||
* `ImageId`
|
||||
* `InstanceId`
|
||||
* `InstanceLifecycle`
|
||||
* `InstanceType`
|
||||
* `KernelId`
|
||||
* `KeyName`
|
||||
* `LaunchTime`
|
||||
* `Platform`
|
||||
* `PrivateDnsName`
|
||||
* `PrivateIpAddress`
|
||||
* `PublicDnsName`
|
||||
* `PublicIpAddress`
|
||||
* `RamdiskId`
|
||||
* `RootDeviceName`
|
||||
* `RootDeviceType`
|
||||
* `SourceDestCheck`
|
||||
* `SpotInstanceRequestId`
|
||||
* `SriovNetSupport`
|
||||
* `SubnetId`
|
||||
* `VirtualizationType`
|
||||
* `VpcId`
|
||||
|
||||
Tags can be selected by prepending the tag name with `Tags.`
|
||||
|
||||
Example `ec2_instance_attribute()` query
|
||||
|
||||
ec2_instance_attribute(us-east-1, Tags.Name, { "tag:Team": [ "sysops" ] })
|
||||
|
||||
## Cost
|
||||
|
||||
Amazon provides 1 million CloudWatch API requests each month at no additional charge. Past this,
|
||||
|
||||
@@ -92,9 +92,10 @@ The Elasticsearch data source supports two types of queries you can use in the *
|
||||
Query | Description
|
||||
------------ | -------------
|
||||
*{"find": "fields", "type": "keyword"} | Returns a list of field names with the index type `keyword`.
|
||||
*{"find": "terms", "field": "@hostname"}* | Returns a list of values for a field using term aggregation. Query will user current dashboard time range as time range for query.
|
||||
*{"find": "terms", "field": "@hostname", "size": 1000}* | Returns a list of values for a field using term aggregation. Query will user current dashboard time range as time range for query.
|
||||
*{"find": "terms", "field": "@hostname", "query": '<lucene query>'}* | Returns a list of values for a field using term aggregation & and a specified lucene query filter. Query will use current dashboard time range as time range for query.
|
||||
|
||||
There is a default size limit of 500 on terms queries. Set the size property in your query to set a custom limit.
|
||||
You can use other variables inside the query. Example query definition for a variable named `$host`.
|
||||
|
||||
```
|
||||
|
||||
@@ -41,7 +41,9 @@ Proxy access means that the Grafana backend will proxy all requests from the bro
|
||||
Click the ``Select metric`` link to start navigating the metric space. One you start you can continue using the mouse
|
||||
or keyboard arrow keys. You can select a wildcard and still continue.
|
||||
|
||||

|
||||
{{< docs-imagebox img="/img/docs/v45/graphite_query1_still.png"
|
||||
animated-gif="/img/docs/v45/graphite_query1.gif" >}}
|
||||
|
||||
|
||||
### Functions
|
||||
|
||||
@@ -50,18 +52,26 @@ a function is selected it will be added and your focus will be in the text box o
|
||||
a parameter just click on it and it will turn into a text box. To delete a function click the function name followed
|
||||
by the x icon.
|
||||
|
||||

|
||||
{{< docs-imagebox img="/img/docs/v45/graphite_query2_still.png"
|
||||
animated-gif="/img/docs/v45/graphite_query2.gif" >}}
|
||||
|
||||
|
||||
### Optional parameters
|
||||
|
||||
Some functions like aliasByNode support an optional second argument. To add this parameter specify for example 3,-2 as the first parameter and the function editor will adapt and move the -2 to a second parameter. To remove the second optional parameter just click on it and leave it blank and the editor will remove it.
|
||||
|
||||

|
||||
{{< docs-imagebox img="/img/docs/v45/graphite_query3_still.png"
|
||||
animated-gif="/img/docs/v45/graphite_query3.gif" >}}
|
||||
|
||||
|
||||
### Nested Queries
|
||||
|
||||
You can reference queries by the row “letter” that they’re on (similar to Microsoft Excel). If you add a second query to a graph, you can reference the first query simply by typing in #A. This provides an easy and convenient way to build compounded queries.
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/graphite_nested_queries_still.png"
|
||||
animated-gif="/img/docs/v45/graphite_nested_queries.gif" >}}
|
||||
|
||||
|
||||
## Point consolidation
|
||||
|
||||
All Graphite metrics are consolidated so that Graphite doesn't return more data points than there are pixels in the graph. By default,
|
||||
|
||||
@@ -36,12 +36,12 @@ Name | Description
|
||||
### Proxy vs Direct access
|
||||
|
||||
Proxy access means that the Grafana backend will proxy all requests from the browser. So requests to InfluxDB will be channeled through
|
||||
`grafana-server`. This means that the URL you specify needs to be accessable from the server you are running Grafana on. Proxy access
|
||||
`grafana-server`. This means that the URL you specify needs to be accessible from the server you are running Grafana on. Proxy access
|
||||
mode is also more secure as the username & password will never reach the browser.
|
||||
|
||||
## Query Editor
|
||||
|
||||

|
||||
{{< docs-imagebox img="/img/docs/v45/influxdb_query_still.png" class="docs-image--no-shadow" animated-gif="/img/docs/v45/influxdb_query.gif" >}}
|
||||
|
||||
You find the InfluxDB editor in the metrics tab in Graph or Singlestat panel's edit mode. You enter edit mode by clicking the
|
||||
panel title, then edit. The editor allows you to select metrics and tags.
|
||||
@@ -57,10 +57,8 @@ will automatically adjust the filter tag condition to use the InfluxDB regex mat
|
||||
|
||||
### Field & Aggregation functions
|
||||
In the `SELECT` row you can specify what fields and functions you want to use. If you have a
|
||||
group by time you need an aggregation function. Some functions like derivative require an aggregation function.
|
||||
|
||||
The editor tries simplify and unify this part of the query. For example:
|
||||

|
||||
group by time you need an aggregation function. Some functions like derivative require an aggregation function. The editor tries simplify and unify this part of the query. For example:<br>
|
||||
<br>
|
||||
|
||||
The above will generate the following InfluxDB `SELECT` clause:
|
||||
|
||||
@@ -88,8 +86,8 @@ You can switch to raw query mode by clicking hamburger icon and then `Switch edi
|
||||
- $m = replaced with measurement name
|
||||
- $measurement = replaced with measurement name
|
||||
- $col = replaced with column name
|
||||
- $tag_hostname = replaced with the value of the hostname tag
|
||||
- You can also use [[tag_hostname]] pattern replacement syntax
|
||||
- $tag_exampletag = replaced with the value of the `exampletag` tag. The syntax is `$tag_yourTagName` (must start with `$tag_`). To use your tag as an alias in the ALIAS BY field then the tag must be used to group by in the query.
|
||||
- You can also use [[tag_hostname]] pattern replacement syntax. For example, in the ALIAS BY field using this text `Host: [[tag_hostname]]` would substitute in the `hostname` tag value for each legend value and an example legend value would be: `Host: server1`.
|
||||
|
||||
### Table query / raw data
|
||||
|
||||
@@ -132,7 +130,7 @@ You can fetch key names for a given measurement.
|
||||
SHOW TAG KEYS [FROM <measurement_name>]
|
||||
```
|
||||
|
||||
If you have a variable with key names you can use this variable in a group by clause. This will allow you to change group by using the variable dropdown a the top
|
||||
If you have a variable with key names you can use this variable in a group by clause. This will allow you to change group by using the variable dropdown at the top
|
||||
of the dashboard.
|
||||
|
||||
### Using variables in queries
|
||||
|
||||
@@ -11,8 +11,7 @@ weight = 7
|
||||
|
||||
# Using MySQL in Grafana
|
||||
|
||||
> Only available in Grafana v4.3+. This data source is not ready for
|
||||
> production use, currently in development (alpha state).
|
||||
> Only available in Grafana v4.3+.
|
||||
|
||||
Grafana ships with a built-in MySQL data source plugin that allow you to query any visualize
|
||||
data from a MySQL compatible database.
|
||||
@@ -29,8 +28,7 @@ data from a MySQL compatible database.
|
||||
The database user you specify when you add the data source should only be granted SELECT permissions on
|
||||
the specified database & tables you want to query. Grafana does not validate that the query is safe. The query
|
||||
could include any SQL statement. For example, statements like `USE otherdb;` and `DROP TABLE user;` would be
|
||||
executed. To protect against this we **Highly** recommmend you create a specific mysql user with
|
||||
restricted permissions.
|
||||
executed. To protect against this we **Highly** recommmend you create a specific mysql user with restricted permissions.
|
||||
|
||||
Example:
|
||||
|
||||
@@ -49,11 +47,9 @@ Macro example | Description
|
||||
------------ | -------------
|
||||
*$__timeFilter(dateColumn)* | Will be replaced by a time range filter using the specified column name. For example, *dateColumn > FROM_UNIXTIME(1494410783) AND dateColumn < FROM_UNIXTIME(1494497183)*
|
||||
|
||||
We plan to add many more macros. If you have suggestions for what macros you would like to see, please
|
||||
[open an issue](https://github.com/grafana/grafana) in our GitHub repo.
|
||||
We plan to add many more macros. If you have suggestions for what macros you would like to see, please [open an issue](https://github.com/grafana/grafana) in our GitHub repo.
|
||||
|
||||
The query editor has a link named `Generated SQL` that show up after a query as been executed, while in panel edit mode. Click
|
||||
on it and it will expand and show the raw interpolated SQL string that was executed.
|
||||
The query editor has a link named `Generated SQL` that show up after a query as been executed, while in panel edit mode. Click on it and it will expand and show the raw interpolated SQL string that was executed.
|
||||
|
||||
## Table queries
|
||||
|
||||
@@ -61,8 +57,7 @@ If the `Format as` query option is set to `Table` then you can basically do any
|
||||
|
||||
Query editor with example query:
|
||||
|
||||

|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/mysql_table_query.png" >}}
|
||||
|
||||
The query:
|
||||
|
||||
@@ -109,8 +104,71 @@ This is something we plan to add.
|
||||
|
||||
## Templating
|
||||
|
||||
You can use variables in your queries but there are currently no support for defining `Query` variables
|
||||
that target a MySQL data source.
|
||||
This feature is currently available in the nightly builds and will be included in the 5.0.0 release.
|
||||
|
||||
Instead of hard-coding things like server, application and sensor name in you metric queries you can use variables in their place. Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns makes it easy to change the data being displayed in your dashboard.
|
||||
|
||||
Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different types of template variables.
|
||||
|
||||
### Query Variable
|
||||
|
||||
If you add a template variable of the type `Query`, you can write a MySQL query that can
|
||||
return things like measurement names, key names or key values that are shown as a dropdown select box.
|
||||
|
||||
For example, you can have a variable that contains all values for the `hostname` column in a table if you specify a query like this in the templating variable *Query* setting.
|
||||
|
||||
```sql
|
||||
SELECT hostname FROM my_host
|
||||
```
|
||||
|
||||
A query can returns multiple columns and Grafana will automatically create a list from them. For example, the query below will return a list with values from `hostname` and `hostname2`.
|
||||
|
||||
```sql
|
||||
SELECT my_host.hostname, my_other_host.hostname2 FROM my_host JOIN my_other_host ON my_host.city = my_other_host.city
|
||||
```
|
||||
|
||||
Another option is a query that can create a key/value variable. The query should return two columns that are named `__text` and `__value`. The `__text` column value should be unique (if it is not unique then the first value is used). The options in the dropdown will have a text and value that allows you to have a friendly name as text and an id as the value. An example query with `hostname` as the text and `id` as the value:
|
||||
|
||||
```sql
|
||||
SELECT hostname AS __text, id AS __value FROM my_host
|
||||
```
|
||||
|
||||
You can also create nested variables. For example if you had another variable named `region`. Then you could have
|
||||
the hosts variable only show hosts from the current selected region with a query like this (if `region` is a multi-value variable then use the `IN` comparison operator rather than `=` to match against multiple values):
|
||||
|
||||
```sql
|
||||
SELECT hostname FROM my_host WHERE region IN($region)
|
||||
```
|
||||
|
||||
### Using Variables in Queries
|
||||
|
||||
Template variables are quoted automatically so if it is a string value do not wrap them in quotes in where clauses. If the variable is a multi-value variable then use the `IN` comparison operator rather than `=` to match against multiple values.
|
||||
|
||||
There are two syntaxes:
|
||||
|
||||
`$<varname>` Example with a template variable named `hostname`:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
UNIX_TIMESTAMP(atimestamp) as time_sec,
|
||||
aint as value,
|
||||
avarchar as metric
|
||||
FROM my_table
|
||||
WHERE $__timeFilter(atimestamp) and hostname in($hostname)
|
||||
ORDER BY atimestamp ASC
|
||||
```
|
||||
|
||||
`[[varname]]` Example with a template variable named `hostname`:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
UNIX_TIMESTAMP(atimestamp) as time_sec,
|
||||
aint as value,
|
||||
avarchar as metric
|
||||
FROM my_table
|
||||
WHERE $__timeFilter(atimestamp) and hostname in([[hostname]])
|
||||
ORDER BY atimestamp ASC
|
||||
```
|
||||
|
||||
## Alerting
|
||||
|
||||
|
||||
@@ -39,14 +39,16 @@ Name | Description
|
||||
|
||||
Open a graph in edit mode by click the title > Edit (or by pressing `e` key while hovering over panel).
|
||||
|
||||

|
||||
{{< docs-imagebox img="/img/docs/v45/prometheus_query_editor_still.png"
|
||||
animated-gif="/img/docs/v45/prometheus_query_editor.gif" >}}
|
||||
|
||||
Name | Description
|
||||
------- | --------
|
||||
*Query expression* | Prometheus query expression, check out the [Prometheus documentation](http://prometheus.io/docs/querying/basics/).
|
||||
*Legend format* | Controls the name of the time series, using name or pattern. For example `{{hostname}}` will be replaced with label value for the label `hostname`.
|
||||
*Min step* | Set a lower limit for the Prometheus step option. Step controls how big the jumps are when the Prometheus query engine performs range queries. Sadly there is no official prometheus documentation to link to for this very important option.
|
||||
*Resolution* | Controls the step option. Small steps create high-resolution graphs but can be slow over larger time ranges, lowering the resolution can speed things up. `1/2` will try to set step option to generate 1 data point for every other pixel. A value of `1/10` will try to set step option so there is a data point every 10 pixels.*Metric lookup* | Search for metric names in this input field.
|
||||
*Resolution* | Controls the step option. Small steps create high-resolution graphs but can be slow over larger time ranges, lowering the resolution can speed things up. `1/2` will try to set step option to generate 1 data point for every other pixel. A value of `1/10` will try to set step option so there is a data point every 10 pixels.
|
||||
*Metric lookup* | Search for metric names in this input field.
|
||||
*Format as* | **(New in v4.3)** Switch between Table & Time series. Table format will only work in the Table panel.
|
||||
|
||||
## Templating
|
||||
@@ -77,7 +79,7 @@ For details of *metric names*, *label names* and *label values* are please refer
|
||||
There are two syntaxes:
|
||||
|
||||
- `$<varname>` Example: rate(http_requests_total{job=~"$job"}[5m])
|
||||
- `[[varname]]` Example: rate(http_requests_total{job="my[[job]]"}[5m])
|
||||
- `[[varname]]` Example: rate(http_requests_total{job=~"[[job]]"}[5m])
|
||||
|
||||
Why two ways? The first syntax is easier to read and write but does not allow you to use a variable in the middle of a word. When the *Multi-value* or *Include all value*
|
||||
options are enabled, Grafana converts the labels from plain text to a regex compatible string. Which means you have to use `=~` instead of `=`.
|
||||
|
||||
@@ -13,7 +13,7 @@ weight = 20
|
||||
|
||||
The purpose of this data sources is to make it easier to create fake data for any panel.
|
||||
Using `Grafana TestData` you can build your own time series and have any panel render it.
|
||||
This make is much easier to verify functionally since the data can be shared very
|
||||
This make is much easier to verify functionally since the data can be shared very easily.
|
||||
|
||||
## Enable
|
||||
|
||||
|
||||
@@ -50,15 +50,11 @@ populate the template variable to a desired value from the link.
|
||||
The metrics tab defines what series data and sources to render. Each datasource provides different
|
||||
options.
|
||||
|
||||
## Axes & Grid
|
||||
## Axes
|
||||
|
||||

|
||||
|
||||
The Axes & Grid tab controls the display of axes, grids and legend.
|
||||
|
||||
### Axes
|
||||
|
||||
The ``Left Y`` and ``Right Y`` can be customized using:
|
||||
The Axes tab controls the display of axes, grids and legend. The ``Left Y`` and ``Right Y`` can be customized using:
|
||||
|
||||
- ``Unit`` - The display unit for the Y value
|
||||
- ``Grid Max`` - The maximum Y value. (default auto)
|
||||
@@ -67,6 +63,20 @@ The ``Left Y`` and ``Right Y`` can be customized using:
|
||||
|
||||
Axes can also be hidden by unchecking the appropriate box from `Show Axis`.
|
||||
|
||||
### X-Axis Mode
|
||||
|
||||
There are three options:
|
||||
|
||||
- The default option is `Time` and means the x-axis represents time and that the data is grouped by time (for example, by hour or by minute).
|
||||
|
||||
- The `Series` option means that the data is grouped by series and not by time. The y-axis still represents the value.
|
||||
|
||||
<img src="/img/docs/v4/x_axis_mode_series.png" class="no-shadow">
|
||||
|
||||
- The `Histogram` option converts the graph into a histogram. A Histogram is a kind of bar chart that groups numbers into ranges, often called buckets or bins. Taller bars show that more data falls in that range. Histograms and buckets are described in more detail [here](http://docs.grafana.org/features/panels/heatmap/#histograms-and-buckets).
|
||||
|
||||
<img src="/img/docs/v43/heatmap_histogram.png" class="no-shadow">
|
||||
|
||||
### Legend
|
||||
|
||||
The legend hand be hidden by checking the ``Show`` checkbox. If it's shown, it can be
|
||||
|
||||
@@ -34,7 +34,7 @@ The singlestat panel has a normal query editor to allow you define your exact me
|
||||
* `delta` - The total incremental increase (of a counter) in the series. An attempt is made to account for counter resets, but this will only be accurate for single instance metrics. Used to show total counter increase in time series.
|
||||
* `diff` - The difference betwen 'current' (last value) and 'first'.
|
||||
* `range` - The difference between 'min' and 'max'. Useful the show the range of change for a gauge.
|
||||
4. `Postfixes`: The Postfix fields let you define a custom label and font-size (as a %) to appear *after* the value
|
||||
4. `Prefix/Postfix`: The Prefix/Postfix fields let you define a custom label and font-size (as a %) to appear *before/after* the value. The `$__name` variable can be used here to use the series name or alias from the metric query.
|
||||
5. `Units`: Units are appended to the the Singlestat within the panel, and will respect the color and threshold settings for the value.
|
||||
6. `Decimals`: The Decimal field allows you to override the automatic decimal precision, and set it explicitly.
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ weight = 2
|
||||
|
||||
# Table Panel
|
||||
|
||||
<img src="/assets/img/features/table-panel.png">
|
||||
<img class="screenshot" src="/assets/img/features/table-panel.png">
|
||||
|
||||
The new table panel is very flexible, supporting both multiple modes for time series as well as for
|
||||
table, annotation and raw JSON data. It also provides date formatting and value formatting and coloring options.
|
||||
@@ -22,55 +22,63 @@ To view table panels in action and test different configurations with sample dat
|
||||
## Options overview
|
||||
|
||||
The table panel has many ways to manipulate your data for optimal presentation.
|
||||
{{< docs-imagebox img="/img/docs/v45/table_options.png" class="docs-image--no-shadow" max-width= "500px" >}}
|
||||
|
||||
<img class="no-shadow" src="/img/docs/v2/table-config2.png">
|
||||
|
||||
1. `Data`: Control how your query is transformed into a table.
|
||||
2. `Table Display`: Table display options.
|
||||
3. `Column Styles`: Column value formatting and display options.
|
||||
2. `Paging`: Table display options.
|
||||
|
||||
|
||||
## Data to Table
|
||||
|
||||
<img class="no-shadow" src="/img/docs/v2/table-data-options.png">
|
||||
{{< docs-imagebox img="/img/docs/v45/table_data_options.png" max-width="500px" class="docs-image--right">}}
|
||||
|
||||
The data section contains the **To Table Transform (1)**. This is the primary option for how your data/metric
|
||||
query should be transformed into a table format. The **Columns (2)** option allows you to select what columns
|
||||
you want in the table. Only applicable for some transforms.
|
||||
|
||||
<div class="clearfix"></div>
|
||||
|
||||
### Time series to rows
|
||||
|
||||
<img src="/img/docs/v2/table_ts_to_rows2.png">
|
||||
{{< docs-imagebox img="/img/docs/v45/table_ts_to_rows.png" >}}
|
||||
|
||||
In the most simple mode you can turn time series to rows. This means you get a `Time`, `Metric` and a `Value` column. Where `Metric` is the name of the time series.
|
||||
|
||||
### Time series to columns
|
||||
|
||||

|
||||
{{< docs-imagebox img="/img/docs/v45/table_ts_to_columns.png" >}}
|
||||
|
||||
|
||||
This transform allows you to take multiple time series and group them by time. Which will result in the primary column being `Time` and a column for each time series.
|
||||
|
||||
### Time series aggregations
|
||||
|
||||

|
||||
{{< docs-imagebox img="/img/docs/v45/table_ts_to_aggregations.png" >}}
|
||||
|
||||
This table transformation will lay out your table into rows by metric, allowing columns of `Avg`, `Min`, `Max`, `Total`, `Current` and `Count`. More than one column can be added.
|
||||
|
||||
### Annotations
|
||||

|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/table_annotations.png" >}}
|
||||
|
||||
|
||||
If you have annotations enabled in the dashboard you can have the table show them. If you configure this
|
||||
mode then any queries you have in the metrics tab will be ignored.
|
||||
|
||||
### JSON Data
|
||||

|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/table_json_data.png" max-width="500px" >}}
|
||||
|
||||
If you have an Elasticsearch **Raw Document** query or an Elasticsearch query without a `date histogram` use this
|
||||
transform mode and pick the columns using the **Columns** section.
|
||||
|
||||

|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/elastic_raw_doc.png" >}}
|
||||
|
||||
## Table Display
|
||||
|
||||
<img class="no-shadow" src="/img/docs/v2/table-display.png">
|
||||
{{< docs-imagebox img="/img/docs/v45/table_paging.png" class="docs-image--no-shadow docs-image--right" max-width="350px" >}}
|
||||
|
||||
1. `Pagination (Page Size)`: The table display fields allow you to control The `Pagination` (page size) is the threshold at which the table rows will be broken into pages. For example, if your table had 95 records with a pagination value of 10, your table would be split across 9 pages.
|
||||
2. `Scroll`: The `scroll bar` checkbox toggles the ability to scroll within the panel, when unchecked, the panel height will grow to display all rows.
|
||||
@@ -81,13 +89,11 @@ transform mode and pick the columns using the **Columns** section.
|
||||
|
||||
The column styles allow you control how dates and numbers are formatted.
|
||||
|
||||
<img class="no-shadow" src="/img/docs/v2/Column-Options.png">
|
||||
{{< docs-imagebox img="/img/docs/v45/table_column_styles.png" class="docs-image--no-shadow" >}}
|
||||
|
||||
1. `Name or regex`: The Name or Regex field controls what columns the rule should be applied to. The regex or name filter will be matched against the column name not against column values.
|
||||
2. `Type`: The three supported types of types are `Number`, `String` and `Date`.
|
||||
3. `Title`: Title for the column, when using a Regex the title can include replacement strings like `$1`.
|
||||
4. `Format`: Specify date format. Only available when `Type` is set to `Date`.
|
||||
5. `Coloring` and `Thresholds`: Specify color mode and thresholds limits.
|
||||
6. `Unit` and `Decimals`: Specify unit and decimal precision for numbers.
|
||||
7. `Add column style rule`: Add new column rule.
|
||||
2. `Column Header`: Title for the column, when using a Regex the title can include replacement strings like `$1`.
|
||||
3. `Add column style rule`: Add new column rule.
|
||||
4. `Thresholds` and `Coloring`: Specify color mode and thresholds limits.
|
||||
5. `Type`: The three supported types of types are `Number`, `String` and `Date`. `Unit` and `Decimals`: Specify unit and decimal precision for numbers.`Format`: Specify date format for dates.
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ weight = 7
|
||||
|
||||
# Keyboard shortcuts
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v4/shortcuts.png" max-width="20rem" >}}
|
||||
{{< docs-imagebox img="/img/docs/v4/shortcuts.png" max-width="20rem" class="docs-image--right" >}}
|
||||
|
||||
Grafana v4 introduces a number of really powerful keyboard shortcuts. You can now focus a panel
|
||||
by hovering over it with your mouse. With a panel focused you can simple hit `e` to toggle panel
|
||||
|
||||
@@ -24,9 +24,9 @@ Read the [Basic Concepts](/guides/basic_concepts) document to get a crash course
|
||||
|
||||
### Top header
|
||||
|
||||
Let's start with creating a new Dashboard. You can find the new Dashboard link at the bottom of the Dashboard picker. You now have a blank Dashboard.
|
||||
Let's start with creating a new Dashboard. You can find the new Dashboard link on the right side of the Dashboard picker. You now have a blank Dashboard.
|
||||
|
||||
<img class="no-shadow" src="/img/docs/v2/v2_top_nav_annotated.png">
|
||||
<img class="no-shadow" src="/img/docs/v45/top_nav_annotated.png">
|
||||
|
||||
The image above shows you the top header for a Dashboard.
|
||||
|
||||
@@ -41,19 +41,7 @@ The image above shows you the top header for a Dashboard.
|
||||
|
||||
Dashboards are at the core of what Grafana is all about. Dashboards are composed of individual Panels arranged on a number of Rows. Grafana ships with a variety of Panels. Grafana makes it easy to construct the right queries, and customize the display properties so that you can create the perfect Dashboard for your need. Each Panel can interact with data from any configured Grafana Data Source (currently InfluxDB, Graphite, OpenTSDB, Prometheus and Cloudwatch). The [Basic Concepts](/guides/basic_concepts) guide explores these key ideas in detail.
|
||||
|
||||
|
||||
## Adding & Editing Graphs and Panels
|
||||
|
||||

|
||||
|
||||
1. You add panels via row menu. The row menu is the green icon to the left of each row.
|
||||
2. To edit the graph you click on the graph title to open the panel menu, then `Edit`.
|
||||
3. This should take you to the `Metrics` tab. In this tab you should see the editor for your default data source.
|
||||
|
||||
When you click the `Metrics` tab, you are presented with a Query Editor that is specific to the Panel Data Source. Use the Query Editor to build your queries and Grafana will visualize them in real time.
|
||||
|
||||
|
||||
<img src="/img/docs/v2/dashboard_annotated.png" class="no-shadow">
|
||||
<img src="/img/docs/v45/dashboard_annotated.png" class="no-shadow">
|
||||
|
||||
1. Zoom out time range
|
||||
2. Time picker dropdown. Here you can access relative time range options, auto refresh options and set custom absolute time ranges.
|
||||
@@ -62,6 +50,17 @@ When you click the `Metrics` tab, you are presented with a Query Editor that is
|
||||
5. Dashboard panel. You edit panels by clicking the panel title.
|
||||
6. Graph legend. You can change series colors, y-axis and series visibility directly from the legend.
|
||||
|
||||
|
||||
## Adding & Editing Graphs and Panels
|
||||
|
||||

|
||||
|
||||
1. You add panels via row menu. The row menu is the icon to the left of each row.
|
||||
2. To edit the graph you click on the graph title to open the panel menu, then `Edit`.
|
||||
3. This should take you to the `Metrics` tab. In this tab you should see the editor for your default data source.
|
||||
|
||||
When you click the `Metrics` tab, you are presented with a Query Editor that is specific to the Panel Data Source. Use the Query Editor to build your queries and Grafana will visualize them in real time.
|
||||
|
||||
## Drag-and-Drop panels
|
||||
|
||||
You can Drag-and-Drop Panels within and between Rows. Click and hold the Panel title, and drag it to its new location. You can also easily resize panels by clicking the (-) and (+) icons.
|
||||
|
||||
50
docs/sources/guides/whats-new-in-v4-4.md
Normal file
50
docs/sources/guides/whats-new-in-v4-4.md
Normal file
@@ -0,0 +1,50 @@
|
||||
+++
|
||||
title = "What's New in Grafana v4.4"
|
||||
description = "Feature & improvement highlights for Grafana v4.4"
|
||||
keywords = ["grafana", "new", "documentation", "4.4.0"]
|
||||
type = "docs"
|
||||
[menu.docs]
|
||||
name = "Version 4.4"
|
||||
identifier = "v4.4"
|
||||
parent = "whatsnew"
|
||||
weight = -3
|
||||
+++
|
||||
|
||||
## What's New in Grafana v4.4
|
||||
|
||||
Grafana v4.4 is now [available for download](https://grafana.com/grafana/download/4.4.0).
|
||||
|
||||
**Highlights**:
|
||||
|
||||
- Dashboard History - version control for dashboards.
|
||||
|
||||
## New Features
|
||||
|
||||
**Dashboard History**: View dashboard version history, compare any two versions (summary & json diffs), restore to old version. This big feature
|
||||
was contributed by **Walmart Labs**. Big thanks to them for this massive contribution!
|
||||
Initial feature request: [#4638](https://github.com/grafana/grafana/issues/4638)
|
||||
Pull Request: [#8472](https://github.com/grafana/grafana/pull/8472)
|
||||
|
||||
## Enhancements
|
||||
* **Elasticsearch**: Added filter aggregation label [#8420](https://github.com/grafana/grafana/pull/8420), thx [@tianzk](github.com/tianzk)
|
||||
* **Sensu**: Added option for source and handler [#8405](https://github.com/grafana/grafana/pull/8405), thx [@joemiller](github.com/joemiller)
|
||||
* **CSV**: Configurable csv export datetime format [#8058](https://github.com/grafana/grafana/issues/8058), thx [@cederigo](github.com/cederigo)
|
||||
* **Table Panel**: Column style that preserves formatting/indentation (like pre tag) [#6617](https://github.com/grafana/grafana/issues/6617)
|
||||
* **DingDing**: Add DingDing Alert Notifier [#8473](https://github.com/grafana/grafana/pull/8473) thx [@jiamliang](https://github.com/jiamliang)
|
||||
|
||||
## Minor Enhancements
|
||||
|
||||
* **Elasticsearch**: Add option for result set size in raw_document [#3426](https://github.com/grafana/grafana/issues/3426) [#8527](https://github.com/grafana/grafana/pull/8527), thx [@mk-dhia](github.com/mk-dhia)
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* **Graph**: Bug fix for negative values in histogram mode [#8628](https://github.com/grafana/grafana/issues/8628)
|
||||
|
||||
## Download
|
||||
|
||||
Head to the [v4.4 download page](https://grafana.com/grafana/download) for download links & instructions.
|
||||
|
||||
## Thanks
|
||||
|
||||
A big thanks to all the Grafana users who contribute by submitting PRs, bug reports, helping out on our [community site](https://community.grafana.com/) and providing feedback!
|
||||
|
||||
71
docs/sources/guides/whats-new-in-v4-5.md
Normal file
71
docs/sources/guides/whats-new-in-v4-5.md
Normal file
@@ -0,0 +1,71 @@
|
||||
+++
|
||||
title = "What's New in Grafana v4.5"
|
||||
description = "Feature & improvement highlights for Grafana v4.5"
|
||||
keywords = ["grafana", "new", "documentation", "4.5"]
|
||||
type = "docs"
|
||||
[menu.docs]
|
||||
name = "Version 4.5"
|
||||
identifier = "v4.5"
|
||||
parent = "whatsnew"
|
||||
weight = -4
|
||||
+++
|
||||
|
||||
# What's New in Grafana v4.5
|
||||
|
||||
## Hightlights
|
||||
|
||||
### New prometheus query editor
|
||||
|
||||
The new query editor has full syntax highlighting. As well as auto complete for metrics, functions, and range vectors. There is also integrated function docs right from the query editor!
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/prometheus_query_editor_still.png" class="docs-image--block" animated-gif="/img/docs/v45/prometheus_query_editor.gif" >}}
|
||||
|
||||
### Elasticsearch: Add ad-hoc filters from the table panel
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v45/elastic_ad_hoc_filters.png" class="docs-image--block" >}}
|
||||
|
||||
### Table cell links!
|
||||
Create column styles that turn cells into links that use the value in the cell (or other other row values) to generate a url to another dashboard or system:
|
||||

|
||||
|
||||
### Query Inspector
|
||||
Query Inspector is a new feature that shows query requests and responses. This can be helpful if a graph is not shown or shows something very different than what you expected.
|
||||
More information [here](https://community.grafana.com/t/using-grafanas-query-inspector-to-troubleshoot-issues/2630).
|
||||

|
||||
|
||||
## Changelog
|
||||
|
||||
### New Features
|
||||
|
||||
* **Table panel**: Render cell values as links that can have an url template that uses variables from current table row. [#3754](https://github.com/grafana/grafana/issues/3754)
|
||||
* **Elasticsearch**: Add ad hoc filters directly by clicking values in table panel [#8052](https://github.com/grafana/grafana/issues/8052).
|
||||
* **MySQL**: New rich query editor with syntax highlighting
|
||||
* **Prometheus**: New rich query editor with syntax highlighting, metric & range auto complete and integrated function docs. [#5117](https://github.com/grafana/grafana/issues/5117)
|
||||
|
||||
### Enhancements
|
||||
|
||||
* **GitHub OAuth**: Support for GitHub organizations with 100+ teams. [#8846](https://github.com/grafana/grafana/issues/8846), thx [@skwashd](https://github.com/skwashd)
|
||||
* **Graphite**: Calls to Graphite api /metrics/find now include panel or dashboad time range (from & until) in most cases, [#8055](https://github.com/grafana/grafana/issues/8055)
|
||||
* **Graphite**: Added new graphite 1.0 functions, available if you set version to 1.0.x in data source settings. New Functions: mapSeries, reduceSeries, isNonNull, groupByNodes, offsetToZero, grep, weightedAverage, removeEmptySeries, aggregateLine, averageOutsidePercentile, delay, exponentialMovingAverage, fallbackSeries, integralByInterval, interpolate, invert, linearRegression, movingMin, movingMax, movingSum, multiplySeriesWithWildcards, pow, powSeries, removeBetweenPercentile, squareRoot, timeSlice, closes [#8261](https://github.com/grafana/grafana/issues/8261)
|
||||
- **Elasticsearch**: Ad-hoc filters now use query phrase match filters instead of term filters, works on non keyword/raw fields [#9095](https://github.com/grafana/grafana/issues/9095).
|
||||
|
||||
### Breaking change
|
||||
|
||||
* **InfluxDB/Elasticsearch**: The panel & data source option named "Group by time interval" is now named "Min time interval" and does now always define a lower limit for the auto group by time. Without having to use `>` prefix (that prefix still works). This should in theory have close to zero actual impact on existing dashboards. It does mean that if you used this setting to define a hard group by time interval of, say "1d", if you zoomed to a time range wide enough the time range could increase above the "1d" range as the setting is now always considered a lower limit.
|
||||
|
||||
This option is now rennamed (and moved to Options sub section above your queries):
|
||||

|
||||
|
||||
Datas source selection & options & help are now above your metric queries.
|
||||

|
||||
|
||||
### Minor Changes
|
||||
|
||||
* **InfluxDB**: Change time range filter for absolute time ranges to be inclusive instead of exclusive [#8319](https://github.com/grafana/grafana/issues/8319), thx [@Oxydros](https://github.com/Oxydros)
|
||||
* **InfluxDB**: Added paranthesis around tag filters in queries [#9131](https://github.com/grafana/grafana/pull/9131)
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* **Modals**: Maintain scroll position after opening/leaving modal [#8800](https://github.com/grafana/grafana/issues/8800)
|
||||
* **Templating**: You cannot select data source variables as data source for other template variables [#7510](https://github.com/grafana/grafana/issues/7510)
|
||||
|
||||
@@ -11,14 +11,16 @@ parent = "http_api"
|
||||
|
||||
# Admin API
|
||||
|
||||
The admin http API does not currently work with an api token. Api Token's are currently only linked to an organization and organization role. They cannot given
|
||||
the permission of server admin, only user's can be given that permission. So in order to use these API calls you will have to use basic auth and Grafana user
|
||||
with Grafana admin permission.
|
||||
The Admin HTTP API does not currently work with an API Token. API Tokens are currently only linked to an organization and an organization role. They cannot be given
|
||||
the permission of server admin, only users can be given that permission. So in order to use these API calls you will have to use Basic Auth and the Grafana user
|
||||
must have the Grafana Admin permission. (The default admin user is called `admin` and has permission to use this API.)
|
||||
|
||||
## Settings
|
||||
|
||||
`GET /api/admin/settings`
|
||||
|
||||
Only works with Basic Authentication (username and password). See [introduction](http://docs.grafana.org/http_api/admin/#admin-api) for an explanation.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
GET /api/admin/settings
|
||||
@@ -176,6 +178,8 @@ with Grafana admin permission.
|
||||
|
||||
`GET /api/admin/stats`
|
||||
|
||||
Only works with Basic Authentication (username and password). See [introduction](http://docs.grafana.org/http_api/admin/#admin-api) for an explanation.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
GET /api/admin/stats
|
||||
@@ -203,7 +207,7 @@ with Grafana admin permission.
|
||||
|
||||
`POST /api/admin/users`
|
||||
|
||||
Create new user
|
||||
Create new user. Only works with Basic Authentication (username and password). See [introduction](http://docs.grafana.org/http_api/admin/#admin-api) for an explanation.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
@@ -229,7 +233,8 @@ Create new user
|
||||
|
||||
`PUT /api/admin/users/:id/password`
|
||||
|
||||
Change password for specific user
|
||||
Only works with Basic Authentication (username and password). See [introduction](http://docs.grafana.org/http_api/admin/#admin-api) for an explanation.
|
||||
Change password for a specific user.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
@@ -250,6 +255,8 @@ Change password for specific user
|
||||
|
||||
`PUT /api/admin/users/:id/permissions`
|
||||
|
||||
Only works with Basic Authentication (username and password). See [introduction](http://docs.grafana.org/http_api/admin/#admin-api) for an explanation.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
PUT /api/admin/users/2/permissions HTTP/1.1
|
||||
@@ -269,6 +276,8 @@ Change password for specific user
|
||||
|
||||
`DELETE /api/admin/users/:id`
|
||||
|
||||
Only works with Basic Authentication (username and password). See [introduction](http://docs.grafana.org/http_api/admin/#admin-api) for an explanation.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
DELETE /api/admin/users/2 HTTP/1.1
|
||||
@@ -286,6 +295,8 @@ Change password for specific user
|
||||
|
||||
`POST /api/admin/pause-all-alerts`
|
||||
|
||||
Only works with Basic Authentication (username and password). See [introduction](http://docs.grafana.org/http_api/admin/#admin-api) for an explanation.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
POST /api/admin/pause-all-alerts HTTP/1.1
|
||||
|
||||
@@ -46,6 +46,7 @@ JSON Body schema:
|
||||
|
||||
- **200** – Created
|
||||
- **400** – Errors (invalid json, missing or invalid fields, etc)
|
||||
- **401** – Unauthorized
|
||||
- **412** – Precondition failed
|
||||
|
||||
The **412** status code is used when a newer dashboard already exists (newer, its version is greater than the version that was sent). The
|
||||
@@ -239,7 +240,7 @@ Get all tags of dashboards
|
||||
|
||||
`GET /api/search/`
|
||||
|
||||
Status Codes:
|
||||
Query parameters:
|
||||
|
||||
- **query** – Search Query
|
||||
- **tag** – Tag to use
|
||||
@@ -268,9 +269,3 @@ Status Codes:
|
||||
"isStarred":false
|
||||
}
|
||||
]
|
||||
|
||||
"email":"admin@mygraf.com",
|
||||
"login":"admin",
|
||||
"role":"Admin"
|
||||
}
|
||||
]
|
||||
|
||||
321
docs/sources/http_api/dashboard_versions.md
Normal file
321
docs/sources/http_api/dashboard_versions.md
Normal file
@@ -0,0 +1,321 @@
|
||||
+++
|
||||
title = "Dashboard Versions HTTP API "
|
||||
description = "Grafana Dashboard Versions HTTP API"
|
||||
keywords = ["grafana", "http", "documentation", "api", "dashboard", "versions"]
|
||||
aliases = ["/http_api/dashboardversions/"]
|
||||
type = "docs"
|
||||
[menu.docs]
|
||||
name = "Dashboard Versions"
|
||||
parent = "http_api"
|
||||
+++
|
||||
|
||||
# Dashboard Versions
|
||||
|
||||
## Get all dashboard versions
|
||||
|
||||
Query parameters:
|
||||
|
||||
- **limit** - Maximum number of results to return
|
||||
- **start** - Version to start from when returning queries
|
||||
|
||||
`GET /api/dashboards/id/:dashboardId/versions`
|
||||
|
||||
Gets all existing dashboard versions for the dashboard with the given `dashboardId`.
|
||||
|
||||
**Example request for getting all dashboard versions**:
|
||||
|
||||
```http
|
||||
GET /api/dashboards/id/1/versions?limit=2?start=0 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example Response**
|
||||
|
||||
```http
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json; charset=UTF-8
|
||||
Content-Length: 428
|
||||
|
||||
```
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** - Ok
|
||||
- **400** - Errors
|
||||
- **401** - Unauthorized
|
||||
- **404** - Dashboard version not found
|
||||
|
||||
## Get dashboard version
|
||||
|
||||
`GET /api/dashboards/id/:dashboardId/versions/:id`
|
||||
|
||||
Get the dashboard version with the given id, for the dashboard with the given id.
|
||||
|
||||
**Example request for getting a dashboard version**:
|
||||
|
||||
```http
|
||||
GET /api/dashboards/id/1/versions/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
```
|
||||
|
||||
**Example response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json; charset=UTF-8
|
||||
Content-Length: 1300
|
||||
|
||||
```
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** - Ok
|
||||
- **401** - Unauthorized
|
||||
- **404** - Dashboard version not found
|
||||
|
||||
## Restore dashboard
|
||||
|
||||
`POST /api/dashboards/id/:dashboardId/restore`
|
||||
|
||||
Restores a dashboard to a given dashboard version.
|
||||
|
||||
**Example request for restoring a dashboard version**:
|
||||
|
||||
```http
|
||||
POST /api/dashboards/id/1/restore
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
{
|
||||
"version": 1
|
||||
}
|
||||
```
|
||||
|
||||
JSON body schema:
|
||||
|
||||
- **version** - The dashboard version to restore to
|
||||
|
||||
**Example response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json; charset=UTF-8
|
||||
Content-Length: 67
|
||||
|
||||
```
|
||||
|
||||
JSON response body schema:
|
||||
|
||||
- **slug** - the URL friendly slug of the dashboard's title
|
||||
- **status** - whether the restoration was successful or not
|
||||
- **version** - the new dashboard version, following the restoration
|
||||
|
||||
Status codes:
|
||||
|
||||
- **200** - OK
|
||||
- **401** - Unauthorized
|
||||
- **404** - Not found (dashboard not found or dashboard version not found)
|
||||
- **500** - Internal server error (indicates issue retrieving dashboard tags from database)
|
||||
|
||||
**Example error response**
|
||||
|
||||
```http
|
||||
HTTP/1.1 404 Not Found
|
||||
Content-Type: application/json; charset=UTF-8
|
||||
Content-Length: 46
|
||||
|
||||
```
|
||||
|
||||
JSON response body schema:
|
||||
|
||||
- **message** - Message explaining the reason for the request failure.
|
||||
|
||||
## Compare dashboard versions
|
||||
|
||||
`POST /api/dashboards/calculate-diff`
|
||||
|
||||
Compares two dashboard versions by calculating the JSON diff of them.
|
||||
|
||||
**Example request**:
|
||||
|
||||
```http
|
||||
POST /api/dashboards/calculate-diff HTTP/1.1
|
||||
Accept: text/html
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
```
|
||||
|
||||
JSON body schema:
|
||||
|
||||
- **base** - an object representing the base dashboard version
|
||||
- **new** - an object representing the new dashboard version
|
||||
- **diffType** - the type of diff to return. Can be "json" or "basic".
|
||||
|
||||
**Example response (JSON diff)**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: text/html; charset=UTF-8
|
||||
|
||||
```
|
||||
|
||||
The response is a textual respresentation of the diff, with the dashboard values being in JSON, similar to the diffs seen on sites like GitHub or GitLab.
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** - Ok
|
||||
- **400** - Bad request (invalid JSON sent)
|
||||
- **401** - Unauthorized
|
||||
- **404** - Not found
|
||||
|
||||
**Example response (basic diff)**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: text/html; charset=UTF-8
|
||||
|
||||
```
|
||||
|
||||
The response here is a summary of the changes, derived from the diff between the two JSON objects.
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** - OK
|
||||
- **400** - Bad request (invalid JSON sent)
|
||||
- **401** - Unauthorized
|
||||
- **404** - Not found
|
||||
POST /api/dashboards/id/1/restore
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
{
|
||||
"version": 1
|
||||
}
|
||||
```
|
||||
|
||||
JSON body schema:
|
||||
|
||||
- **version** - The dashboard version to restore to
|
||||
|
||||
**Example response**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json; charset=UTF-8
|
||||
Content-Length: 67
|
||||
|
||||
{
|
||||
"slug": "my-dashboard",
|
||||
"status": "success",
|
||||
"version": 3
|
||||
}
|
||||
```
|
||||
|
||||
JSON response body schema:
|
||||
|
||||
- **slug** - the URL friendly slug of the dashboard's title
|
||||
- **status** - whether the restoration was successful or not
|
||||
- **version** - the new dashboard version, following the restoration
|
||||
|
||||
Status codes:
|
||||
|
||||
- **200** - OK
|
||||
- **401** - Unauthorized
|
||||
- **404** - Not found (dashboard not found or dashboard version not found)
|
||||
- **500** - Internal server error (indicates issue retrieving dashboard tags from database)
|
||||
|
||||
**Example error response**
|
||||
|
||||
```http
|
||||
HTTP/1.1 404 Not Found
|
||||
Content-Type: application/json; charset=UTF-8
|
||||
Content-Length: 46
|
||||
|
||||
{
|
||||
"message": "Dashboard version not found"
|
||||
}
|
||||
```
|
||||
|
||||
JSON response body schema:
|
||||
|
||||
- **message** - Message explaining the reason for the request failure.
|
||||
|
||||
## Compare dashboard versions
|
||||
|
||||
`POST /api/dashboards/calculate-diff`
|
||||
|
||||
Compares two dashboard versions by calculating the JSON diff of them.
|
||||
|
||||
**Example request**:
|
||||
|
||||
```http
|
||||
POST /api/dashboards/calculate-diff HTTP/1.1
|
||||
Accept: text/html
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
{
|
||||
"base": {
|
||||
"dashboardId": 1,
|
||||
"version": 1
|
||||
},
|
||||
"new": {
|
||||
"dashboardId": 1,
|
||||
"version": 2
|
||||
},
|
||||
"diffType": "json"
|
||||
}
|
||||
```
|
||||
|
||||
JSON body schema:
|
||||
|
||||
- **base** - an object representing the base dashboard version
|
||||
- **new** - an object representing the new dashboard version
|
||||
- **diffType** - the type of diff to return. Can be "json" or "basic".
|
||||
|
||||
**Example response (JSON diff)**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: text/html; charset=UTF-8
|
||||
|
||||
<p id="l1" class="diff-line diff-json-same">
|
||||
<!-- Diff omitted -->
|
||||
</p>
|
||||
```
|
||||
|
||||
The response is a textual respresentation of the diff, with the dashboard values being in JSON, similar to the diffs seen on sites like GitHub or GitLab.
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** - Ok
|
||||
- **400** - Bad request (invalid JSON sent)
|
||||
- **401** - Unauthorized
|
||||
- **404** - Not found
|
||||
|
||||
**Example response (basic diff)**:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: text/html; charset=UTF-8
|
||||
|
||||
<div class="diff-group">
|
||||
<!-- Diff omitted -->
|
||||
</div>
|
||||
```
|
||||
|
||||
The response here is a summary of the changes, derived from the diff between the two JSON objects.
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** - OK
|
||||
- **400** - Bad request (invalid JSON sent)
|
||||
- **401** - Unauthorized
|
||||
- **404** - Not found
|
||||
@@ -137,7 +137,7 @@ parent = "http_api"
|
||||
|
||||
`POST /api/datasources`
|
||||
|
||||
**Example Request**:
|
||||
**Example Graphite Request**:
|
||||
|
||||
POST /api/datasources HTTP/1.1
|
||||
Accept: application/json
|
||||
@@ -152,6 +152,28 @@ parent = "http_api"
|
||||
"basicAuth":false
|
||||
}
|
||||
|
||||
**Example CloudWatch Request**:
|
||||
```
|
||||
POST /api/datasources HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
|
||||
{
|
||||
"name": "test_datasource",
|
||||
"type": "cloudwatch",
|
||||
"url": "http://monitoring.us-west-1.amazonaws.com",
|
||||
"access": "proxy",
|
||||
"jsonData": {
|
||||
"authType": "keys",
|
||||
"defaultRegion": "us-west-1"
|
||||
},
|
||||
"secureJsonData": {
|
||||
"accessKey": "Ol4pIDpeKSA6XikgOl4p",
|
||||
"secretKey": "dGVzdCBrZXkgYmxlYXNlIGRvbid0IHN0ZWFs"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
|
||||
|
||||
@@ -52,6 +52,15 @@ parent = "http_api"
|
||||
"expires": 3600
|
||||
}
|
||||
|
||||
JSON Body schema:
|
||||
|
||||
- **dashboard** – Required. The complete dashboard model.
|
||||
- **name** – Optional. snapshot name
|
||||
- **expires** - Optional. When the snapshot should expire in seconds. 3600 is 1 hour, 86400 is 1 day. Default is never to expire.
|
||||
- **external** - Optional. Save the snapshot on an external server rather than locally. Default is `false`.
|
||||
- **key** - Optional. Define the unique key. Required if **external** is `true`.
|
||||
- **deleteKey** - Optional. Unique key used to delete the snapshot. It is different from the **key** so that only the creator can delete the snapshot. Required if **external** is `true`.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
|
||||
@@ -20,9 +20,9 @@ parent = "http_api"
|
||||
GET /api/users HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
Default value for the `perpage` parameter is `1000` and for the `page` parameter is `1`.
|
||||
Default value for the `perpage` parameter is `1000` and for the `page` parameter is `1`. Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
@@ -55,10 +55,12 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
GET /api/users/search?perpage=10&page=1&query=mygraf HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
Default value for the `perpage` parameter is `1000` and for the `page` parameter is `1`. The `totalCount` field in the response can be used for pagination of the user list E.g. if `totalCount` is equal to 100 users and the `perpage` parameter is set to 10 then there are 10 pages of users. The `query` parameter is optional and it will return results where the query value is contained in one of the `name`, `login` or `email` fields. Query values with spaces need to be url encoded e.g. `query=Jane%20Doe`.
|
||||
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
@@ -94,7 +96,9 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
GET /api/users/1 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
@@ -126,7 +130,9 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
GET /api/users/lookup?loginOrEmail=admin HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
@@ -152,7 +158,7 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
PUT /api/users/2 HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
{
|
||||
"email":"user@mygraf.com",
|
||||
@@ -161,6 +167,8 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
"theme":"light"
|
||||
}
|
||||
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
@@ -178,7 +186,9 @@ Default value for the `perpage` parameter is `1000` and for the `page` parameter
|
||||
GET /api/users/1/orgs HTTP/1.1
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Response**:
|
||||
|
||||
@@ -246,11 +256,29 @@ Changes the password for the user
|
||||
|
||||
{"message":"User password changed"}
|
||||
|
||||
## Switch user context
|
||||
## Switch user context for a specified user
|
||||
|
||||
`POST /api/user/using/:organisationId`
|
||||
`POST /api/users/:userId/using/:organizationId`
|
||||
|
||||
Switch user context to the given organisation.
|
||||
Switch user context to the given organization. Requires basic authentication and that the authenticated user is a Grafana Admin.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
POST /api/users/7/using/2 HTTP/1.1
|
||||
Authorization: Basic YWRtaW46YWRtaW4=
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
Content-Type: application/json
|
||||
|
||||
{"message":"Active organization changed"}
|
||||
|
||||
## Switch user context for signed in user
|
||||
|
||||
`POST /api/user/using/:organizationId`
|
||||
|
||||
Switch user context to the given organization.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
|
||||
@@ -15,6 +15,12 @@ weight = 1
|
||||
The Grafana back-end has a number of configuration options that can be
|
||||
specified in a `.ini` configuration file or specified using environment variables.
|
||||
|
||||
## Comments In .ini Files
|
||||
|
||||
Semicolons (the `;` char) are the standard way to comment out lines in a `.ini` file.
|
||||
|
||||
A common problem is forgetting to uncomment a line in the `custom.ini` (or `grafana.ini`) file which causes the configuration option to be ignored.
|
||||
|
||||
## Config file locations
|
||||
|
||||
- Default configuration from `$WORKING_DIR/conf/defaults.ini`
|
||||
@@ -203,6 +209,12 @@ For MySQL, use either `true`, `false`, or `skip-verify`.
|
||||
|
||||
(MySQL only) The common name field of the certificate used by the `mysql` server. Not necessary if `ssl_mode` is set to `skip-verify`.
|
||||
|
||||
### max_idle_conn
|
||||
The maximum number of connections in the idle connection pool.
|
||||
|
||||
### max_open_conn
|
||||
The maximum number of open connections to the database.
|
||||
|
||||
<hr />
|
||||
|
||||
## [security]
|
||||
@@ -240,13 +252,13 @@ Define a white list of allowed ips/domains to use in data sources. Format: `ip_o
|
||||
### allow_sign_up
|
||||
|
||||
Set to `false` to prohibit users from being able to sign up / create
|
||||
user accounts. Defaults to `true`. The admin user can still create
|
||||
user accounts. Defaults to `false`. The admin user can still create
|
||||
users from the [Grafana Admin Pages](../../reference/admin)
|
||||
|
||||
### allow_org_create
|
||||
|
||||
Set to `false` to prohibit users from creating new organizations.
|
||||
Defaults to `true`.
|
||||
Defaults to `false`.
|
||||
|
||||
### auto_assign_org
|
||||
|
||||
@@ -444,20 +456,29 @@ false only pre-existing Grafana users will be able to login (if ldap authenticat
|
||||
<hr>
|
||||
|
||||
## [auth.proxy]
|
||||
|
||||
This feature allows you to handle authentication in a http reverse proxy.
|
||||
|
||||
### enabled
|
||||
|
||||
Defaults to `false`
|
||||
|
||||
### header_name
|
||||
|
||||
Defaults to X-WEBAUTH-USER
|
||||
|
||||
#### header_property
|
||||
|
||||
Defaults to username but can also be set to email
|
||||
|
||||
### auto_sign_up
|
||||
|
||||
Set to `true` to enable auto sign up of users who do not exist in Grafana DB. Defaults to `true`.
|
||||
|
||||
### whitelist
|
||||
|
||||
Limit where auth proxy requests come from by configuring a list of IP addresses. This can be used to prevent users spoofing the X-WEBAUTH-USER header.
|
||||
|
||||
<hr>
|
||||
|
||||
## [session]
|
||||
|
||||
@@ -15,7 +15,8 @@ weight = 1
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Stable for Debian-based Linux | [grafana_4.3.0_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.3.0_amd64.deb)
|
||||
Stable for Debian-based Linux | [grafana_4.4.3_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.4.3_amd64.deb)
|
||||
Beta for Debian-based Linux | [grafana_4.5.0-beta1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.0-beta1_amd64.deb)
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
installation.
|
||||
@@ -23,20 +24,18 @@ installation.
|
||||
## Install Stable
|
||||
|
||||
```bash
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.2.0_amd64.deb
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.4.3_amd64.deb
|
||||
sudo apt-get install -y adduser libfontconfig
|
||||
sudo dpkg -i grafana_4.2.0_amd64.deb
|
||||
sudo dpkg -i grafana_4.4.3_amd64.deb
|
||||
```
|
||||
|
||||
<!--
|
||||
## Install Beta
|
||||
## Install Latest Beta
|
||||
|
||||
```bash
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.3.0-beta1_amd64.deb
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.0-beta1_amd64.deb
|
||||
sudo apt-get install -y adduser libfontconfig
|
||||
sudo dpkg -i grafana_4.3.0-beta1_amd64.deb
|
||||
sudo dpkg -i grafana_4.5.0-beta1_amd64.deb
|
||||
```
|
||||
-->
|
||||
|
||||
## APT Repository
|
||||
|
||||
@@ -81,6 +80,7 @@ sudo apt-get install -y apt-transport-https
|
||||
- Installs systemd service (if systemd is available) name `grafana-server.service`
|
||||
- The default configuration sets the log file at `/var/log/grafana/grafana.log`
|
||||
- The default configuration specifies an sqlite3 db at `/var/lib/grafana/grafana.db`
|
||||
- Installs HTML/JS/CSS and other Grafana files at `/usr/share/grafana`
|
||||
|
||||
## Start the server (init.d service)
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ weight = 4
|
||||
|
||||
Grafana is very easy to install and run using the offical docker container.
|
||||
|
||||
$ docker run -i -p 3000:3000 grafana/grafana
|
||||
$ docker run -d -p 3000:3000 grafana/grafana
|
||||
|
||||
All Grafana configuration settings can be defined using environment
|
||||
variables, this is especially useful when using the above container.
|
||||
|
||||
@@ -7,6 +7,7 @@ aliases = ["installation/installation/", "v2.1/installation/install/"]
|
||||
[menu.docs]
|
||||
name = "Installation"
|
||||
identifier = "installation"
|
||||
weight = 1
|
||||
+++
|
||||
|
||||
## Installing Grafana
|
||||
|
||||
@@ -15,7 +15,8 @@ weight = 2
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.3.0 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.3.0-1.x86_64.rpm)
|
||||
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.4.3 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3-1.x86_64.rpm)
|
||||
Latest Beta for CentOS / Fedora / OpenSuse / Redhat Linux | [4.5.0-beta1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.0-beta1.x86_64.rpm)
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
installation.
|
||||
@@ -24,19 +25,19 @@ installation.
|
||||
|
||||
You can install Grafana using Yum directly.
|
||||
|
||||
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.2.0-1.x86_64.rpm
|
||||
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3-1.x86_64.rpm
|
||||
|
||||
Or install manually using `rpm`.
|
||||
|
||||
#### On CentOS / Fedora / Redhat:
|
||||
|
||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.2.0-1.x86_64.rpm
|
||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3-1.x86_64.rpm
|
||||
$ sudo yum install initscripts fontconfig
|
||||
$ sudo rpm -Uvh grafana-4.2.0-1.x86_64.rpm
|
||||
$ sudo rpm -Uvh grafana-4.4.3-1.x86_64.rpm
|
||||
|
||||
#### On OpenSuse:
|
||||
|
||||
$ sudo rpm -i --nodeps grafana-4.2.0-1.x86_64.rpm
|
||||
$ sudo rpm -i --nodeps grafana-4.4.3-1.x86_64.rpm
|
||||
|
||||
## Install via YUM Repository
|
||||
|
||||
|
||||
@@ -11,13 +11,18 @@ weight = 8
|
||||
|
||||
# Troubleshooting
|
||||
|
||||
## visualization & query issues
|
||||
## Visualization & Query issues
|
||||
|
||||
{{< imgbox max-width="40%" img="/img/docs/v45/query_inspector.png" caption="Query Inspector" >}}
|
||||
|
||||
The most common problems are related to the query & response from you data source. Even if it looks
|
||||
like a bug or visualization issue in Grafana it is 99% of time a problem with the data source query or
|
||||
the data source response.
|
||||
|
||||
So make sure to check the query sent and the raw response, learn how in this guide: [How to troubleshoot metric query issues](https://community.grafana.com/t/how-to-troubleshoot-metric-query-issues/50)
|
||||
To check this you should use Query Inspector (new in Grafana v4.5). The query Inspector shows query requests and responses.
|
||||
|
||||
For more on the query insector read [this guide here](https://community.grafana.com/t/using-grafanas-query-inspector-to-troubleshoot-issues/2630). For
|
||||
older versions of Grafana read the [how troubleshoot metric query issue](https://community.grafana.com/t/how-to-troubleshoot-metric-query-issues/50/2) article.
|
||||
|
||||
## Logging
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ weight = 3
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Latest stable package for Windows | [grafana.4.3.0.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.3.0.windows-x64.zip)
|
||||
Latest stable package for Windows | [grafana.4.4.3.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.4.3.windows-x64.zip)
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
installation.
|
||||
@@ -27,7 +27,7 @@ this folder to anywhere you want Grafana to run from. Go into the
|
||||
|
||||
The default Grafana port is `3000`, this port requires extra permissions
|
||||
on windows. Edit `custom.ini` and uncomment the `http_port`
|
||||
configuration option and change it to something like `8080` or similar.
|
||||
configuration option (`;` is the comment character in ini files) and change it to something like `8080` or similar.
|
||||
That port should not require extra Windows privileges.
|
||||
|
||||
Start Grafana by executing `grafana-server.exe`, preferably from the
|
||||
|
||||
@@ -15,14 +15,21 @@ dev environment. Grafana ships with its own required backend server; also comple
|
||||
|
||||
- [Go 1.8.1](https://golang.org/dl/)
|
||||
- [NodeJS LTS](https://nodejs.org/download/)
|
||||
- [Git](https://git-scm.com/downloads)
|
||||
|
||||
## Get Code
|
||||
Create a directory for the project and set your path accordingly. Then download and install Grafana into your $GOPATH directory
|
||||
Create a directory for the project and set your path accordingly (or use the [default Go workspace directory](https://golang.org/doc/code.html#GOPATH)). Then download and install Grafana into your $GOPATH directory:
|
||||
|
||||
```
|
||||
export GOPATH=`pwd`
|
||||
go get github.com/grafana/grafana
|
||||
```
|
||||
|
||||
On Windows use setx instead of export and then restart your command prompt:
|
||||
```
|
||||
setx GOPATH %cd%
|
||||
```
|
||||
|
||||
You may see an error such as: `package github.com/grafana/grafana: no buildable Go source files`. This is just a warning, and you can proceed with the directions.
|
||||
|
||||
## Building the backend
|
||||
@@ -36,6 +43,12 @@ go run build.go build # (or 'go build ./pkg/cmd/grafana-server')
|
||||
The Grafana backend includes Sqlite3 which requires GCC to compile. So in order to compile Grafana on windows you need
|
||||
to install GCC. We recommend [TDM-GCC](http://tdm-gcc.tdragon.net/download).
|
||||
|
||||
[node-gyp](https://github.com/nodejs/node-gyp#installation) is the Node.js native addon build tool and it requires extra dependencies to be installed on Windows. In a command prompt which is run as administrator, run:
|
||||
|
||||
```
|
||||
npm --add-python-to-path='true' --debug install --global windows-build-tools
|
||||
```
|
||||
|
||||
## Build the Front-end Assets
|
||||
|
||||
To build less to css for the frontend you will need a recent version of node (v0.12.0),
|
||||
@@ -55,6 +68,8 @@ go get github.com/Unknwon/bra
|
||||
bra run
|
||||
```
|
||||
|
||||
If the `bra run` command does not work, make sure that the bin directory in your Go workspace directory is in the path. $GOPATH/bin (or %GOPATH%\bin in Windows) is in your path.
|
||||
|
||||
## Running Grafana Locally
|
||||
You can run a local instance of Grafana by running:
|
||||
```
|
||||
@@ -94,3 +109,24 @@ Learn more about Grafana config options in the [Configuration section](/installa
|
||||
|
||||
## Create a pull requests
|
||||
Please contribute to the Grafana project and submit a pull request! Build new features, write or update documentation, fix bugs and generally make Grafana even more awesome.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Problem**: PhantomJS or node-sass errors when running grunt
|
||||
|
||||
**Solution**: delete the node_modules directory. Install [node-gyp](https://github.com/nodejs/node-gyp#installation) properly for your platform. Then run `yarn install --pure-lockfile` again.
|
||||
<br><br>
|
||||
|
||||
**Problem**: When running `bra run` for the first time you get an error that it is not a recognized command.
|
||||
|
||||
**Solution**: Add the bin directory in your Go workspace directory to the path. Per default this is `$HOME/go/bin` on Linux and `%USERPROFILE%\go\bin` on Windows or `$GOPATH/bin` (`%GOPATH%\bin` on Windows) if you have set your own workspace directory.
|
||||
<br><br>
|
||||
|
||||
**Problem**: When executing a `go get` command on Windows and you get an error about the git repository not existing.
|
||||
|
||||
**Solution**: `go get` requires Git. If you run `go get` without Git then it will create an empty directory in your Go workspace for the library you are trying to get. Even after installing Git, you will get a similar error. To fix this, delete the empty directory (for example: if you tried to run `go get github.com/Unknwon/bra` then delete `%USERPROFILE%\go\src\github.com\Unknwon\bra`) and run the `go get` command again.
|
||||
<br><br>
|
||||
|
||||
**Problem**: On Windows, getting errors about a tool not being installed even though you just installed that tool.
|
||||
|
||||
**Solution**: It is usually because it got added to the path and you have to restart your command prompt to use it.
|
||||
|
||||
@@ -17,7 +17,7 @@ you can get title, tags, and text information for the event.
|
||||
|
||||
## Queries
|
||||
|
||||
Annotatation events are fetched via annotation queries. To add a new annotation query to a dashboard
|
||||
Annotation events are fetched via annotation queries. To add a new annotation query to a dashboard
|
||||
open the dashboard settings menu, then select `Annotations`. This will open the dashboard annotations
|
||||
settings view. To create a new annotation query hit the `New` button.
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ Each field in the dashboard JSON is explained below with its usage:
|
||||
| **timezone** | timezone of dashboard, i.e. `utc` or `browser` |
|
||||
| **editable** | whether a dashboard is editable or not |
|
||||
| **hideControls** | whether row controls on the left in green are hidden or not |
|
||||
| **graphTooltip** | TODO |
|
||||
| **graphTooltip** | 0 for no shared crosshair or tooltip (default), 1 for shared crosshair, 2 for shared crosshair AND shared tooltip |
|
||||
| **rows** | row metadata, see [rows section](#rows) for details |
|
||||
| **time** | time range for dashboard, i.e. last 6 hours, last 7 days, etc |
|
||||
| **timepicker** | timepicker metadata, see [timepicker section](#timepicker) for details |
|
||||
|
||||
40
docs/sources/reference/dashboard_history.md
Normal file
40
docs/sources/reference/dashboard_history.md
Normal file
@@ -0,0 +1,40 @@
|
||||
+++
|
||||
title = "Dashboard Version History"
|
||||
keywords = ["grafana", "dashboard", "documentation", "version", "history"]
|
||||
type = "docs"
|
||||
[menu.docs]
|
||||
name = "Dashboard Version History"
|
||||
parent = "dashboard_features"
|
||||
weight = 100
|
||||
+++
|
||||
|
||||
|
||||
# Dashboard Version History
|
||||
|
||||
Whenever you save a version of your dashboard, a copy of that version is saved so that previous versions of your dashboard are never lost. A list of these versions is available by clicking the dashboard menu dropdown, and clicking "Version history".
|
||||
|
||||
<img class="no-shadow" src="/img/docs/v4/dashboard_versions_list.png">
|
||||
|
||||
The dashboard version history feature lets you compare and restore to previously saved dashboard versions.
|
||||
|
||||
## Comparing two dashboard versions
|
||||
|
||||
To compare two dashboard versions, select the two versions from the list that you wish to compare. Once selected, the "Compare versions" button will become clickable. Click the button to view the diff between the two versions.
|
||||
|
||||
<img class="no-shadow" src="/img/docs/v4/dashboard_versions_select.png">
|
||||
|
||||
Upon clicking the button, you'll be brought to the diff view. By default, you'll see a textual summary of the changes, like in the image below.
|
||||
|
||||
<img class="no-shadow" src="/img/docs/v4/dashboard_versions_diff_basic.png">
|
||||
|
||||
If you want to view the diff of the raw JSON that represents your dashboard, you can do that as well by clicking the "JSON Diff" tab on the left.
|
||||
|
||||
If you want to restore to the version you are diffing against, you can do so by clicking the "Restore to version <x>" button in the top right.
|
||||
|
||||
## Restoring to a previously saved dashboard version
|
||||
|
||||
If you need to restore to a previously saved dashboard version, you can do so by either clicking the "Restore" button on the right of a row in the dashboard version list, or by clicking the "Restore to version <x>" button appearing in the diff view. Clicking the button will bring up the following popup prompting you to confirm the restoration.
|
||||
|
||||
<img class="no-shadow" src="/img/docs/v4/dashboard_versions_restore.png">
|
||||
|
||||
After restoring to a previous version, a new version will be created containing the same exact data as the previous version, only with a different version number. This is indicated in the "Notes column" for the row in the new dashboard version. This is done simply to ensure your previous dashboard versions are not affected by the change.
|
||||
@@ -16,7 +16,7 @@ Since Grafana automatically scales Dashboards to any resolution they're perfect
|
||||
|
||||
## Creating a Playlist
|
||||
|
||||
{{< docs-imagebox img="/img/docs/v3/playlist.png" max-width="25rem" >}}
|
||||
{{< docs-imagebox img="/img/docs/v3/playlist.png" max-width="25rem" class="docs-image--right">}}
|
||||
|
||||
The Playlist feature can be accessed from Grafana's sidemenu, in the Dashboard submenu.
|
||||
|
||||
@@ -43,3 +43,25 @@ Playlists can also be manually controlled utilizing the Playlist controls at the
|
||||
Click the stop button to stop the Playlist, and exit to the current Dashboard.
|
||||
Click the next button to advance to the next Dashboard in the Playlist.
|
||||
Click the back button to rewind to the previous Dashboard in the Playlist.
|
||||
|
||||
## TV or Kiosk Mode
|
||||
|
||||
In TV mode the top navbar, row & panel controls will all fade to transparent.
|
||||
|
||||
This happens automatically after one minute of user inactivity but can also be toggled manually
|
||||
with the `d v` sequence shortcut. Any mouse movement or keyboard action will
|
||||
restore navbar & controls.
|
||||
|
||||
Another feature is the kiosk mode - in kiosk mode the navbar is completely hidden/removed from view. This can be enabled with the `d k`
|
||||
shortcut.
|
||||
|
||||
To put a playlist into kiosk mode, use the `d k` shortcut after the playlist has started. The same shortcut will toggle the playlist out of kiosk mode.
|
||||
|
||||
### Linking to the Playlist in Kiosk Mode
|
||||
|
||||
If you want to create a link to the playlist with kiosk mode enabled:
|
||||
|
||||
1. Copy the Start Url (by right clicking on the Play button and choosing Copy link address).
|
||||
2. Add the `?kiosk` parameter to the url.
|
||||
|
||||
For example, to open the first playlist on the Grafana Play site in kiosk mode: [http://play.grafana.org/playlists/play/1?kiosk](http://play.grafana.org/playlists/play/1?kiosk)
|
||||
|
||||
@@ -8,10 +8,12 @@ weight = 6
|
||||
+++
|
||||
|
||||
# Sharing features
|
||||
|
||||
Grafana provides a number of ways to share a dashboard or a specific panel to other users within your
|
||||
organization. It also provides ways to publish interactive snapshots that can be accessed by external partners.
|
||||
|
||||
## Share dashboard
|
||||
|
||||
Share a dashboard via the share icon in the top nav. This opens the share dialog where you
|
||||
can get a link to the current dashboard with the current selected time range and template variables. If you have
|
||||
made changes to the dashboard, make sure those are saved before sending the link.
|
||||
@@ -25,18 +27,35 @@ snapshots can be accessed by anyone who has the link and can reach the URL.
|
||||

|
||||
|
||||
### Publish snapshots
|
||||
|
||||
You can publish snapshots to you local instance or to [snapshot.raintank.io](http://snapshot.raintank.io). The later is a free service
|
||||
that is provided by [Raintank](http://raintank.io) that allows you to publish dashboard snapshots to an external grafana instance.
|
||||
The same rules still apply, anyone with the link can view it. You can set an expiration time if you want the snapshot to be removed
|
||||
after a certain time period.
|
||||
|
||||
## Share Panel
|
||||
Click a panel title to open the panel menu, then click share in the panel menu to open the Share Panel dialog. Here you
|
||||
have access to a link that will take you to exactly this panel with the current time range and selected template variables.
|
||||
You also get a link to service side rendered PNG of the panel. Useful if you want to share an image of the panel.
|
||||
Please note that for OSX and Windows, you will need to ensure that a `phantomjs` binary is available under `vendor/phantomjs/phantomjs`. For Linux, a `phantomjs` binary is included - however, you should ensure that any requisite libraries (e.g. libfontconfig) are available.
|
||||
|
||||
Click a panel title to open the panel menu, then click share in the panel menu to open the Share Panel dialog. Here you have access to a link that will take you to exactly this panel with the current time range and selected template variables.
|
||||
|
||||
### Direct Link Rendered Image
|
||||
|
||||
You also get a link to service side rendered PNG of the panel. Useful if you want to share an image of the panel. Please note that for OSX and Windows, you will need to ensure that a `phantomjs` binary is available under `vendor/phantomjs/phantomjs`. For Linux, a `phantomjs` binary is included - however, you should ensure that any requisite libraries (e.g. libfontconfig) are available.
|
||||
|
||||
Example of a link to a server-side rendered PNG:
|
||||
|
||||
```
|
||||
http://play.grafana.org/render/dashboard-solo/db/grafana-play-home?orgId=1&panelId=4&from=1499272191563&to=1499279391563&width=1000&height=500&tz=UTC%2B02%3A00&timeout=5000
|
||||
```
|
||||
|
||||
#### Query String Parameters For Server-Side Rendered Images
|
||||
|
||||
- **width**: width in pixels. Default is 800.
|
||||
- **height**: height in pixels. Default is 400.
|
||||
- **tz**: timezone in the format `UTC%2BHH%3AMM` where HH and MM are offset in hours and minutes after UTC
|
||||
- **timeout**: number of seconds. The timeout can be increased if the query for the panel needs more than the default 30 seconds.
|
||||
|
||||
### Embed Panel
|
||||
|
||||
You can embed a panel using an iframe on another web site. This tab will show you the html that you need to use.
|
||||
|
||||
Example:
|
||||
@@ -46,4 +65,16 @@ Example:
|
||||
```
|
||||
|
||||
Below there should be an interactive Grafana graph embedded in an iframe:
|
||||
|
||||
<iframe src="https://snapshot.raintank.io/dashboard-solo/snapshot/y7zwi2bZ7FcoTlB93WN7yWO4aMiz3pZb?from=1493369923321&to=1493377123321&panelId=4" width="650" height="300" frameborder="0"></iframe>
|
||||
|
||||
### Export Panel Data
|
||||
|
||||

|
||||
|
||||
The submenu for a panel can be found by clicking on the title of a panel and then on the hamburger (three horizontal lines) submenu on the left of the context menu.
|
||||
|
||||
This menu contains two options for exporting data:
|
||||
|
||||
- The panel JSON (the specification and not the data) can be exported or updated via the panel context menu.
|
||||
- Panel data can be exported in the CSV format for Table and Graph Panels.
|
||||
|
||||
@@ -88,7 +88,7 @@ The query expressions are different for each data source.
|
||||
- [Elasticsearch templating queries]({{< relref "features/datasources/elasticsearch.md#templating" >}})
|
||||
- [InfluxDB templating queries]({{< relref "features/datasources/influxdb.md#templating" >}})
|
||||
- [Prometheus templating queries]({{< relref "features/datasources/prometheus.md#templating" >}})
|
||||
- [OpenTSDB templating queries]({{< relref "features/datasources/prometheus.md#templating" >}})
|
||||
- [OpenTSDB templating queries]({{< relref "features/datasources/opentsdb.md#templating" >}})
|
||||
|
||||
One thing to note is that query expressions can contain references to other variables and in effect create linked variables.
|
||||
Grafana will detect this and automatically refresh a variable when one of it's containing variables change.
|
||||
@@ -97,7 +97,7 @@ Grafana will detect this and automatically refresh a variable when one of it's c
|
||||
|
||||
Option | Description
|
||||
------- | --------
|
||||
*Mulit-value* | If enabled, the variable will support the selection of multiple options at the same time.
|
||||
*Multi-value* | If enabled, the variable will support the selection of multiple options at the same time.
|
||||
*Include All option* | Add a special `All` option whose value includes all options.
|
||||
*Custom all value* | By default the `All` value will include all options in combined expression. This can become very long and can have performance problems. Many times it can be better to specify a custom all value, like a wildcard regex. To make it possible to have custom regex, globs or lucene syntax in the **Custom all value** option it is never escaped so you will have to think avbout what is a valid value for your data source.
|
||||
|
||||
@@ -141,6 +141,46 @@ Use the `Interval` type to create a variable that represents a time span (eg. `1
|
||||
|
||||
This variable type is useful as a parameter to group by time (for InfluxDB), Date histogram interval (for Elasticsearch) or as a *summarize* function parameter (for Graphite).
|
||||
|
||||
Example using the template variable `myinterval` of type `Interval` in a graphite function:
|
||||
|
||||
```
|
||||
summarize($myinterval, sum, false)
|
||||
```
|
||||
|
||||
## Global Built-in Variables
|
||||
|
||||
Grafana has global built-in variables that can be used in expressions in the query editor.
|
||||
|
||||
### The $__interval Variable
|
||||
|
||||
This $__interval variable is similar to the `auto` interval variable that is described above. It can be used as a parameter to group by time (for InfluxDB), Date histogram interval (for Elasticsearch) or as a *summarize* function parameter (for Graphite).
|
||||
|
||||
Grafana automatically calculates an interval that can be used to group by time in queries. When there are more data points than can be shown on a graph then queries can be made more efficient by grouping by a larger interval. It is more efficient to group by 1 day than by 10s when looking at 3 months of data and the graph will look the same and the query will be faster. The `$__interval` is calculated using the time range and the width of the graph (the number of pixels).
|
||||
|
||||
Approximate Calculation: `(from - to) / resolution`
|
||||
|
||||
For example, when the time range is 1 hour and the graph is full screen, then the interval might be calculated to `2m` - points are grouped in 2 minute intervals. If the time range is 6 months and the graph is full screen, then the interval might be `1d` (1 day) - points are grouped by day.
|
||||
|
||||
In the InfluxDB data source, the legacy variable `$interval` is the same variable. `$__interval` should be used instead.
|
||||
|
||||
The InfluxDB and Elasticsearch data sources have `Group by time interval` fields that are used to hard code the interval or to set the minimum limit for the `$__interval` variable (by using the `>` syntax -> `>10m`).
|
||||
|
||||
### The $__interval_ms Variable
|
||||
|
||||
This variable is the `$__interval` variable in milliseconds (and not a time interval formatted string). For example, if the `$__interval` is `20m` then the `$__interval_ms` is `1200000`.
|
||||
|
||||
### The $timeFilter or $__timeFilter Variable
|
||||
|
||||
The `$timeFilter` variable returns the currently selected time range as an expression. For example, the time range interval `Last 7 days` expression is `time > now() - 7d`.
|
||||
|
||||
This is used in the WHERE clause for the InfluxDB data source. Grafana adds it automatically to InfluxDB queries when in Query Editor Mode. It has to be added manually in Text Editor Mode: `WHERE $timeFilter`.
|
||||
|
||||
The `$__timeFilter` is used in the MySQL data source.
|
||||
|
||||
### The $__name Variable
|
||||
|
||||
This variable is only available in the Singlestat panel and can be used in the prefix or suffix fields on the Options tab. The variable will be replaced with the series name or alias.
|
||||
|
||||
## Repeating Panels
|
||||
|
||||
Template variables can be very useful to dynamically change your queries across a whole dashboard. If you want
|
||||
|
||||
74
docs/sources/tutorials/api_org_token_howto.md
Normal file
74
docs/sources/tutorials/api_org_token_howto.md
Normal file
@@ -0,0 +1,74 @@
|
||||
+++
|
||||
title = "API Tutorial: How To Create API Tokens And Dashboards For A Specific Organization"
|
||||
type = "docs"
|
||||
keywords = ["grafana", "tutorials", "API", "Token", "Org", "Organization"]
|
||||
[menu.docs]
|
||||
parent = "tutorials"
|
||||
weight = 10
|
||||
+++
|
||||
|
||||
# API Tutorial: How To Create API Tokens And Dashboards For A Specific Organization
|
||||
|
||||
A common scenario is to want to via the Grafana API setup new Grafana organizations or to add dynamically generated dashboards to an existing organization.
|
||||
|
||||
## Authentication
|
||||
|
||||
There are two ways to authenticate against the API: basic authentication and API Tokens.
|
||||
|
||||
Some parts of the API are only available through basic authentication and these parts of the API usually require that the user is a Grafana Admin. But all organization actions are accessed via an API Token. An API Token is tied to an organization and can be used to create dashboards etc but only for that organization.
|
||||
|
||||
## How To Create A New Organization and an API Token
|
||||
|
||||
The task is to create a new organization and then add a Token that can be used by other users. In the examples below which use basic auth, the user is `admin` and the password is `admin`.
|
||||
|
||||
1. [Create the org](http://docs.grafana.org/http_api/org/#create-organisation). Here is an example using curl:
|
||||
```
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"name":"apiorg"}' http://admin:admin@localhost:3000/api/orgs
|
||||
```
|
||||
|
||||
This should return a response: `{"message":"Organization created","orgId":6}`. Use the orgId for the next steps.
|
||||
|
||||
2. Optional step. If the org was created previously and/or step 3 fails then first [add your Admin user to the org](http://docs.grafana.org/http_api/org/#add-user-in-organisation):
|
||||
```
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"loginOrEmail":"admin", "role": "Admin"}' http://admin:admin@localhost:3000/api/orgs/<org id of new org>/users
|
||||
```
|
||||
|
||||
3. [Switch the org context for the Admin user to the new org](http://docs.grafana.org/http_api/user/#switch-user-context):
|
||||
```
|
||||
curl -X POST http://admin:admin@localhost:3000/api/user/using/<id of new org>
|
||||
```
|
||||
|
||||
4. [Create the API token](http://docs.grafana.org/http_api/auth/#create-api-key):
|
||||
```
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"name":"apikeycurl", "role": "Admin"}' http://admin:admin@localhost:3000/api/auth/keys
|
||||
```
|
||||
|
||||
This should return a response: `{"name":"apikeycurl","key":"eyJrIjoiR0ZXZmt1UFc0OEpIOGN5RWdUalBJTllUTk83VlhtVGwiLCJuIjoiYXBpa2V5Y3VybCIsImlkIjo2fQ=="}`.
|
||||
|
||||
Save the key returned here in your password manager as it is not possible to fetch again it in the future.
|
||||
|
||||
## How To Add A Dashboard
|
||||
|
||||
Using the Token that was created in the previous step, you can create a dashboard or carry out other actions without having to switch organizations.
|
||||
|
||||
1. [Add a dashboard](http://docs.grafana.org/http_api/dashboard/#create-update-dashboard) using the key (or bearer token as it is also called):
|
||||
|
||||
```
|
||||
curl -X POST --insecure -H "Authorization: Bearer eyJrIjoiR0ZXZmt1UFc0OEpIOGN5RWdUalBJTllUTk83VlhtVGwiLCJuIjoiYXBpa2V5Y3VybCIsImlkIjo2fQ==" -H "Content-Type: application/json" -d '{
|
||||
"dashboard": {
|
||||
"id": null,
|
||||
"title": "Production Overview",
|
||||
"tags": [ "templated" ],
|
||||
"timezone": "browser",
|
||||
"rows": [
|
||||
{
|
||||
}
|
||||
],
|
||||
"schemaVersion": 6,
|
||||
"version": 0
|
||||
},
|
||||
"overwrite": false
|
||||
}' http://localhost:3000/api/dashboards/db
|
||||
```
|
||||
|
||||
This import will not work if you exported the dashboard via the Share -> Export menu in the Grafana UI (it strips out data source names etc.). View the JSON and save it to a file instead or fetch the dashboard JSON via the API.
|
||||
@@ -35,6 +35,4 @@ But we suggest that you store the session in redis/memcache since it makes it ea
|
||||
|
||||
## Alerting
|
||||
|
||||
Currently alerting does not support high availability. But this is something that we will be working on in the future.
|
||||
|
||||
|
||||
Currently alerting supports a limited form of high availability. Since v4.2.0 of Grafana, alert notifications are deduped when running multiple servers. This means all alerts are executed on every server but no duplicate alert notifications are sent due to the deduping logic. Proper load balancing of alerts will be introduced in the future.
|
||||
|
||||
@@ -74,7 +74,9 @@ If you do not get an image when opening this link verify that the required font
|
||||
|
||||
### Grafana API Key
|
||||
|
||||
<img src="/img/docs/v2/orgdropdown_api_keys.png" style="width: 150px" class="right"></img>
|
||||
<!--<img src="/img/docs/v2/orgdropdown_api_keys.png" style="width: 150px" class="right"></img>-->
|
||||
{{< docs-imagebox img="/img/docs/v2/orgdropdown_api_keys.png" max-width="150px" class="docs-image--right">}}
|
||||
|
||||
You need to set the environment variable `HUBOT_GRAFANA_API_KEY` to a Grafana API Key.
|
||||
You can add these from the API Keys page which you find in the Organization dropdown.
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{
|
||||
"stable": "4.2.0",
|
||||
"testing": "4.2.0"
|
||||
"stable": "4.4.1",
|
||||
"testing": "4.4.1"
|
||||
}
|
||||
|
||||
14
package.json
14
package.json
@@ -1,10 +1,10 @@
|
||||
{
|
||||
"author": {
|
||||
"name": "Torkel Ödegaard",
|
||||
"company": "Coding Instinct AB"
|
||||
"company": "Grafana Labs"
|
||||
},
|
||||
"name": "grafana",
|
||||
"version": "4.3.1",
|
||||
"version": "4.5.2",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "http://github.com/grafana/grafana.git"
|
||||
@@ -33,7 +33,7 @@
|
||||
"grunt-ng-annotate": "^3.0.0",
|
||||
"grunt-notify": "^0.4.5",
|
||||
"grunt-postcss": "^0.8.0",
|
||||
"grunt-sass": "^1.2.1",
|
||||
"grunt-sass": "^2.0.0",
|
||||
"grunt-string-replace": "~1.3.1",
|
||||
"grunt-systemjs-builder": "^0.2.7",
|
||||
"grunt-usemin": "3.1.1",
|
||||
@@ -53,16 +53,14 @@
|
||||
"systemjs": "0.19.41",
|
||||
"zone.js": "^0.7.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": "4.x",
|
||||
"npm": "2.14.x"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "./node_modules/grunt-cli/bin/grunt",
|
||||
"test": "./node_modules/grunt-cli/bin/grunt test"
|
||||
"test": "./node_modules/grunt-cli/bin/grunt test",
|
||||
"dev": "./node_modules/grunt-cli/bin/grunt && ./node_modules/grunt-cli/bin/grunt watch"
|
||||
},
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"ace-builds": "^1.2.8",
|
||||
"eventemitter3": "^2.0.2",
|
||||
"gaze": "^1.1.2",
|
||||
"grunt-jscs": "3.0.1",
|
||||
|
||||
@@ -6,6 +6,7 @@ set -e
|
||||
|
||||
IS_UPGRADE=false
|
||||
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
[ -z "$GRAFANA_USER" ] && GRAFANA_USER="grafana"
|
||||
|
||||
@@ -17,3 +17,6 @@ CONF_FILE=/etc/grafana/grafana.ini
|
||||
RESTART_ON_UPGRADE=true
|
||||
|
||||
PLUGINS_DIR=/var/lib/grafana/plugins
|
||||
|
||||
# Only used on systemd systems
|
||||
PID_FILE_DIR=/var/run/grafana
|
||||
|
||||
@@ -12,11 +12,13 @@ Group=grafana
|
||||
Type=simple
|
||||
Restart=on-failure
|
||||
WorkingDirectory=/usr/share/grafana
|
||||
ExecStart=/usr/sbin/grafana-server \
|
||||
--config=${CONF_FILE} \
|
||||
--pidfile=${PID_FILE} \
|
||||
cfg:default.paths.logs=${LOG_DIR} \
|
||||
cfg:default.paths.data=${DATA_DIR} \
|
||||
RuntimeDirectory=grafana
|
||||
RuntimeDirectoryMode=0750
|
||||
ExecStart=/usr/sbin/grafana-server \
|
||||
--config=${CONF_FILE} \
|
||||
--pidfile=${PID_FILE_DIR}/grafana-server.pid \
|
||||
cfg:default.paths.logs=${LOG_DIR} \
|
||||
cfg:default.paths.data=${DATA_DIR} \
|
||||
cfg:default.paths.plugins=${PLUGINS_DIR}
|
||||
LimitNOFILE=10000
|
||||
TimeoutStopSec=20
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
#! /usr/bin/env bash
|
||||
version=4.3.0
|
||||
version=4.5.1
|
||||
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${version}_amd64.deb
|
||||
|
||||
package_cloud push grafana/stable/debian/jessie grafana_${version}_amd64.deb
|
||||
package_cloud push grafana/stable/debian/wheezy grafana_${version}_amd64.deb
|
||||
package_cloud push grafana/stable/debian/stretch grafana_${version}_amd64.deb
|
||||
|
||||
package_cloud push grafana/testing/debian/jessie grafana_${version}_amd64.deb
|
||||
package_cloud push grafana/testing/debian/wheezy grafana_${version}_amd64.deb
|
||||
package_cloud push grafana/testing/debian/stretch grafana_${version}_amd64.deb
|
||||
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-${version}-1.x86_64.rpm
|
||||
|
||||
@@ -16,3 +18,5 @@ package_cloud push grafana/testing/el/7 grafana-${version}-1.x86_64.rpm
|
||||
|
||||
package_cloud push grafana/stable/el/7 grafana-${version}-1.x86_64.rpm
|
||||
package_cloud push grafana/stable/el/6 grafana-${version}-1.x86_64.rpm
|
||||
|
||||
rm grafana*.{deb,rpm}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user