Compare commits

..

930 Commits

Author SHA1 Message Date
Torkel Ödegaard
700a3f9f84 Merge branch 'master' of github.com:grafana/grafana 2015-12-04 15:45:06 +01:00
Torkel Ödegaard
f9b13791de change(table panel): changed default transform mode 2015-12-04 15:38:49 +01:00
Torkel Ödegaard
7886318341 Merge pull request #3417 from mtanda/cloudwatch_dimension_filter_fix
(cloudwatch) null check of dimension
2015-12-04 12:30:13 +01:00
Mitsuhiro Tanda
573632012e cloudwatch dimension null check 2015-12-04 20:16:31 +09:00
Torkel Ödegaard
002455da07 fix(table): minor fix to table panel and transform time series to aggregations 2015-12-04 12:15:42 +01:00
Torkel Ödegaard
e9e6ac64bd minor change to build script 2015-12-04 11:21:38 +01:00
Torkel Ödegaard
36ab8ae19c fix(table): minor fix for table panel 2015-12-04 10:44:17 +01:00
Torkel Ödegaard
df366da721 updated version to 2.6.0-beta1 2015-12-04 10:39:50 +01:00
Torkel Ödegaard
67dc761344 fix(security): do not print ENV config values when they are passwords, fixes #3337 2015-12-04 10:38:27 +01:00
Torkel Ödegaard
0b4552a8e7 fix(timerange): fix handling of invalid dates in from/to url parameters, fixes #3345 2015-12-04 10:32:23 +01:00
Torkel Ödegaard
2345b41a74 feat(elasticsearch): added min_doc_count option for date histogram, closes #3416 2015-12-04 10:06:44 +01:00
Torkel Ödegaard
141e395489 polish(influxdb): minor improvements to influxdb editor raw query editor 2015-12-04 09:20:29 +01:00
Torkel Ödegaard
db9c288050 fix(elasticsearch): refactoring of #3321 2015-12-03 18:30:36 +01:00
Torkel Ödegaard
f1cb8f2f25 Merge branch 'keep_es_version' of https://github.com/replay/grafana into replay-keep_es_version 2015-12-03 18:08:26 +01:00
Torkel Ödegaard
7ec2c09249 Merge branch 'master' of github.com:grafana/grafana 2015-12-03 18:05:19 +01:00
Torkel Ödegaard
354bfcea15 Merge branch 'master' of https://github.com/skbkontur/grafana 2015-12-03 18:03:55 +01:00
Torkel Ödegaard
891d7c134c Merge pull request #3205 from mtanda/cloudwatch_suggest_fix
(cloudwatch) fix dimension value suggestion
2015-12-03 17:17:07 +01:00
Mitsuhiro Tanda
eb5822bc7c update cloudwatch docs 2015-12-04 01:04:50 +09:00
Mitsuhiro Tanda
154d70e4e2 remove dimensionPart 2015-12-04 01:02:25 +09:00
Torkel Ödegaard
0011beb654 Merge branch 'master' of github.com:grafana/grafana 2015-12-03 16:32:58 +01:00
Torkel Ödegaard
419251ed35 fix(elasticsearch): fixed issue with default state of elasticsearch query, result in error before query controller could set defaults, moved defaults to query builder, also removed raw query mode as it is pretty broken, fixes #3396 2015-12-03 16:32:35 +01:00
Torkel Ödegaard
bd850d8158 Merge pull request #3410 from bergquist/ignore_npm_log
add npm-debug.log to gitignore
2015-12-03 15:56:38 +01:00
carl bergquist
d7c7f27207 add npm-debug.log to gitignore 2015-12-03 15:29:28 +01:00
Torkel Ödegaard
0fb95297a2 Merge branch 'master' of github.com:grafana/grafana 2015-12-03 15:10:23 +01:00
Torkel Ödegaard
cf1f43dc9d feat(influxdb): support for table queries, closes #3409, #3219 2015-12-03 15:09:39 +01:00
Alexey Larkov
d6935847b4 Web. Fix double slash 2015-12-03 17:36:29 +05:00
Alexey Larkov
0715ba3e5e Merge pull request #1 from grafana/master
Merge with original source
2015-12-03 17:29:00 +05:00
Torkel Ödegaard
efbbb31370 Update README.md 2015-12-03 12:03:06 +01:00
Torkel Ödegaard
8eb3e48bc7 fix(build): fixed build issues with concat not including require_config 2015-12-02 18:22:47 +01:00
Torkel Ödegaard
24324939e1 Merge branch 'fix_refresh_variable_from_url' of https://github.com/toni-moreno/grafana into toni-moreno-fix_refresh_variable_from_url 2015-12-01 15:28:48 +01:00
Torkel Ödegaard
2436cda7ac fix(graph panel): minor spelling change 2015-12-01 15:28:34 +01:00
Torkel Ödegaard
75b83af08f refactoring(ui): minor ui improvement to graph axis tab 2015-12-01 14:04:44 +01:00
Torkel Ödegaard
82d8e3c2b6 feat(graph panel): refactoring of hide zero option, #3336 2015-12-01 13:34:42 +01:00
Torkel Ödegaard
c351f46e47 Merge branch 'hide-zero' of https://github.com/utkarshcmu/grafana into utkarshcmu-hide-zero 2015-12-01 13:08:37 +01:00
Torkel Ödegaard
fb9e8d2166 feat(table panel): fixed issue with column selection for new table panel 2015-12-01 10:16:11 +01:00
Torkel Ödegaard
3668cb6dd4 fix(readme): minor readme update 2015-12-01 09:23:30 +01:00
Torkel Ödegaard
93f3e30cac fix(log): removed logging accidentlally checked in 2015-12-01 09:19:22 +01:00
Torkel Ödegaard
52af346ec8 Merge pull request #3375 from hartfordfive/issue-3374
Add flag option to display app version (Issue 3374)
2015-12-01 09:18:37 +01:00
Torkel Ödegaard
8e0bba4f99 fix(templating): minor fix to default property name, fixes #3378 2015-12-01 09:01:44 +01:00
Torkel Ödegaard
4cc61d10ff fix(docs): fixed typo in installing docs fixes #3379 2015-12-01 08:59:30 +01:00
Al Lefebvre
8deb6d9246 Updated README 2015-11-30 13:09:29 -05:00
Al Lefebvre
20eb6df9e8 Removed comment line 2015-11-30 12:24:14 -05:00
Al Lefebvre
a921eeb5e2 Merge branch 'master' of github.com:hartfordfive/grafana into issue-3374 2015-11-30 12:20:47 -05:00
Al Lefebvre
f3f34e1835 Merge pull request #1 from grafana/master
Updating forked copy from official Grafana master
2015-11-30 12:20:32 -05:00
Al Lefebvre
4a29d459da Added flag to display version number and exit 2015-11-30 12:10:47 -05:00
Torkel Ödegaard
0f867a3484 Merge pull request #3230 from utkarshcmu/time-units
Added currency units, time units, tests
2015-11-30 17:29:51 +01:00
Torkel Ödegaard
6eeddaa564 Merge pull request #3369 from matschaffer/cw/alias-fix
Use of `<dimension name>` seems to confuse angular so changing to DIMENSION_NAME
2015-11-30 17:28:41 +01:00
Torkel Ödegaard
d80458ef81 Merge branch 'refresh' of https://github.com/utkarshcmu/grafana into utkarshcmu-refresh 2015-11-30 17:23:50 +01:00
Torkel Ödegaard
ad15df7222 Merge branch 'influxdb_editor_v3' 2015-11-30 16:29:20 +01:00
Torkel Ödegaard
98f7febed1 feat(influxdb): added all functions 2015-11-30 16:28:56 +01:00
Torkel Ödegaard
4f04eaec3a feat(influxdb): moved query builder tests 2015-11-30 15:27:38 +01:00
Torkel Ödegaard
5a2b9b1f44 feat(influxdb): worked on schema upgrade for new influx query editor 2015-11-30 15:09:18 +01:00
Torkel Ödegaard
721b37a08e feat(influxdb): new editor now supports field and tag lookup again 2015-11-30 10:14:42 +01:00
Torkel Ödegaard
b3d494d4c8 feat(influxdb): minor fixes to new editor 2015-11-30 08:40:11 +01:00
Mat Schaffer
85ec70e92b Use of <dimension name> seems to confuse angular so changing to DIMENSION_NAME 2015-11-30 15:55:07 +09:00
Torkel Ödegaard
e2f0ff9e88 Merge pull request #3367 from utkarshcmu/orgs
Fixed #3357
2015-11-30 07:40:07 +01:00
utkarshcmu
0dbd7d0e17 Fixed #3357 2015-11-29 03:28:07 -08:00
utkarshcmu
8cdaa044e1 Removed repeating test 2015-11-29 03:14:26 -08:00
utkarshcmu
712a420217 Fixed refresh setting for absolute time 2015-11-29 03:12:48 -08:00
Torkel Ödegaard
72d9fcdcb4 feat(influxdb): progress with new influxdb editor 2015-11-27 16:35:40 +01:00
Torkel Ödegaard
aa13a80d83 fix(influxdb): fixed issue with metric segment component that caused double events 2015-11-26 09:28:59 +01:00
Torkel Ödegaard
9e2ef543ed Merge pull request #3343 from mtanda/cloudwatch_fix_credential_check
(cloudwatch) fix EC2RoleProvider parameter
2015-11-26 08:20:50 +01:00
Mitsuhiro Tanda
2aabb387b1 fix EC2RoleProvider parameter 2015-11-26 16:01:33 +09:00
Mitsuhiro Tanda
f972863f49 add credential setting to handleDescribeInstances 2015-11-26 13:21:38 +09:00
Torkel Ödegaard
f00320c8b9 feat(influxdb): query editor is starting to work, can now add group by parts 2015-11-25 14:27:22 +01:00
utkarshcmu
1f57cf08a7 Added an option to hide zero values 2015-11-25 04:38:54 -08:00
utkarshcmu
0ff5ff5dbe Enabled refresh interval for absolute time range 2015-11-25 04:05:40 -08:00
Torkel Ödegaard
5ba19144d5 feat(influxdb): more work on query editor 2015-11-25 12:30:56 +01:00
Torkel Ödegaard
c9ba856c52 feat(influxdb): more work on influxdb editor 2015-11-25 10:22:20 +01:00
Mauro Stettler
a30ceefa6b add tests for elastic search versioning in query builder and make es version 2 default 2015-11-25 16:23:28 +09:00
Torkel Ödegaard
31e2a8b8e9 feat(influxdb): more work onnew influxdb editor 2015-11-24 17:01:18 +01:00
Torkel Ödegaard
9b4150509c feat(influxdb): minor progress on new editor 2015-11-24 11:02:49 +01:00
Torkel Ödegaard
c68cd7d19a Merge branch 'master' into influxdb_editor_v3 2015-11-24 10:06:06 +01:00
Torkel Ödegaard
85382dc2e0 fix(table): fixed table height alignment 2015-11-24 10:04:01 +01:00
Torkel Ödegaard
7315a0ecec Merge pull request #3319 from VibyJocke/master
Fixed some broken HTML.
2015-11-24 09:45:13 +01:00
Torkel Ödegaard
411bd55cd4 Merge pull request #3318 from utkarshcmu/validated-responses
Validated HTTP responses
2015-11-24 09:31:23 +01:00
Joakim Lahtinen
9c0141e84e Fixed some broken HTML.
Simplified some control flow.
2015-11-24 07:29:52 +01:00
utkarshcmu
ca5d0496ee Validated HTTP responses 2015-11-23 19:19:26 -08:00
Torkel Ödegaard
24b9bc1e55 fix(missing files): added missing files, oops 2015-11-23 18:20:12 +01:00
Torkel Ödegaard
cf1e167430 feat(table panel): table panel can now show nested object data, closes #3263 2015-11-23 16:10:32 +01:00
Torkel Ödegaard
a1afd2328d fix(elasticsearch): made interval template variable appear in group by time interval dropdown, fixes #3241 2015-11-23 14:19:10 +01:00
Mauro Stettler
ada9bfcae8 keep track of elastic search version and generate query according to version 2015-11-22 21:39:56 +09:00
Torkel Ödegaard
1fa8b74595 Merge pull request #3303 from utkarshcmu/db-update
Return correct updated+created timestamps to frontend
2015-11-21 14:10:50 +01:00
utkarshcmu
5559ad1238 Return correct updated+created timestamps to frontend 2015-11-21 05:03:00 -08:00
Torkel Ödegaard
396f53d20e fix(tests): removed it.only accidentally cecked in 2015-11-21 13:48:12 +01:00
Torkel Ödegaard
fc78e42a78 Merge pull request #3297 from utkarshcmu/db-update
Fixed created & updated columns update in dashboard table
2015-11-21 10:25:29 +01:00
Torkel Ödegaard
9d1906d333 fix(elasticsearch): fixed issue with disabling (hiding) query, fixes #3300 2015-11-21 10:22:59 +01:00
Torkel Ödegaard
4a3f50cef7 fix(docs): minor fix to http docs, fixes #3301 2015-11-21 10:07:33 +01:00
Torkel Ödegaard
4751e4b94e feat(elasticsearch): a lot of work to support aggregation queries without date_histogram, queries that return metric aggregations now work with the table panel (json data type), #3219 2015-11-20 16:26:44 +01:00
utkarshcmu
1b7f4f31ca Tired of gofmt 2015-11-20 04:52:50 -08:00
utkarshcmu
ca01604b43 Added statements in constructor 2015-11-20 04:37:24 -08:00
utkarshcmu
93f9a0c39c Fixed created & updated columns in dashboard table 2015-11-20 03:10:24 -08:00
Torkel Ödegaard
93977316ce revert #3288 for now since it breaks Elasticsearch 1.7 2015-11-20 10:48:10 +01:00
Torkel Ödegaard
7e49bdf5a8 revert #3288 for now since it breaks Elasticsearch 1.7 2015-11-20 10:45:48 +01:00
Torkel Ödegaard
c0beef7572 Merge pull request #3252 from alechenninger/avg-ignore-null
Ignore nulls unless 'null as zero' for series.stats.avg
2015-11-20 08:27:45 +01:00
Torkel Ödegaard
5ec448861d Merge pull request #3284 from mtanda/prometheus_null_point
(prometheus) support null point mode
2015-11-20 08:27:36 +01:00
Torkel Ödegaard
7d4326a397 Merge pull request #3292 from mtanda/cloudwatch_null_pointmode
(cloudwatch) fix null point mode
2015-11-20 08:24:51 +01:00
Torkel Ödegaard
cc43b94864 Merge pull request #3293 from jmcfarlane/prometheus-fix-step-calibration
[prometheus] Fix step calibration
2015-11-20 08:24:35 +01:00
Torkel Ödegaard
57a31828f8 fix(templating): very minor markup/css change 2015-11-20 08:18:03 +01:00
Torkel Ödegaard
0c7fccdcbe Merge pull request #2812 from utkarshcmu/master
Duplicate button for template variables
2015-11-20 08:14:20 +01:00
Mitsuhiro Tanda
a1fcd3c5b6 import fix step calibration 2015-11-20 15:37:31 +09:00
John McFarlane
c4048f8f22 [prometheus] Fix step calibration
The step interval is (correctly) being converted to seconds, but the
unit of measure suffixed onto the end is preventing the subsequent
step calibration. Because the query upstream defaults to seconds as
the unit of measure, the suffix can simply be removed and everything
works as intended.

patchset 01: Also fix the spec.
2015-11-19 21:20:07 -08:00
Mitsuhiro Tanda
15dc30edf6 if there isn't enough datapoint, add null data point 2015-11-20 13:24:49 +09:00
Mitsuhiro Tanda
4254aa5f5a (cloudwatch) fix null point mode 2015-11-20 13:06:35 +09:00
Torkel Ödegaard
924ecce2c3 Merge pull request #3288 from replay/fix_time_format
specify date format in elastic search query
2015-11-19 18:53:59 +01:00
Mauro Stettler
a6e8d61e8e specify date format in elastic search query 2015-11-20 02:11:48 +09:00
Torkel Ödegaard
be5e6d55fc Merge pull request #3285 from ckrybus/patch-1
Fix recommended graphite version for better performance
2015-11-19 14:01:07 +01:00
Christoph Krybus
b0c01369c5 Fix recommended graphite version for better performance
There is no 0.9.13 graphite release, only 0.9.14.
2015-11-19 13:44:46 +01:00
Torkel Ödegaard
730d4857ba feat(elasticsearch): added caret arrow for metric / group by collapse/expand options 2015-11-19 11:04:21 +01:00
Torkel Ödegaard
0e1c12e65f Merge pull request #3279 from mtanda/override_null
override null point mode
2015-11-19 08:42:01 +01:00
Mitsuhiro Tanda
37b125ca98 override null point mode 2015-11-19 15:20:34 +09:00
Torkel Ödegaard
2e9303cb6c dependency(aws): updated aws go lib dependency 2015-11-18 17:26:35 +01:00
Torkel Ödegaard
7db38c80fc Merge branch 'aws-sdk-go' of https://github.com/mischief/grafana into mischief-aws-sdk-go 2015-11-18 17:16:42 +01:00
Torkel Ödegaard
aad824a562 feat(tablepanel): completed work on time series aggregations table transform, #3219 2015-11-18 17:05:21 +01:00
Torkel Ödegaard
f0087c93b8 fix(admin settings view): always censor provider_config for system info view, fixes #3268 2015-11-18 15:55:45 +01:00
Mitsuhiro Tanda
ae7e7e9656 remove getDimensions() 2015-11-17 22:49:46 +09:00
Torkel Ödegaard
e008473e47 Merge pull request #3249 from mtanda/cloudwatch_null_pointmode
(cloudwatch) support null point mode
2015-11-17 14:03:48 +01:00
Torkel Ödegaard
79313aa268 Merge pull request #3253 from mtanda/cloudwatch_ecs
(cloudwatch) support ECS suggestion
2015-11-17 14:00:31 +01:00
toni-moreno
1c35d4b26b removed autoupdate on variabe refresh, don't needed if working interactive and fix #2722 2015-11-16 21:55:23 +01:00
Torkel Ödegaard
a56f657fb1 Merge pull request #3254 from utkarshcmu/user-id
/api/admin/users returns user ID in JSON response
2015-11-16 17:47:11 +01:00
utkarshcmu
f5db9950f8 fixed gofmt tests 2015-11-16 07:28:38 -08:00
utkarshcmu
e5931e264b Updated http_api docs 2015-11-16 06:56:46 -08:00
utkarshcmu
9485e8cfee /api/admin/users returns user ID 2015-11-16 06:55:02 -08:00
Mitsuhiro Tanda
22b139d9f8 (cloudwatch) support ECS suggestion 2015-11-16 23:21:43 +09:00
Alec Henninger
2a600b25e7 Ignore nulls unless 'null as zero' for series.stats.avg 2015-11-16 08:43:41 -05:00
Mitsuhiro Tanda
393891d6ea (cloudwatch) fill null if the datapoint missing 2015-11-16 18:23:12 +09:00
Mitsuhiro Tanda
1bbd056797 fix templating 2015-11-16 16:51:03 +09:00
Mitsuhiro Tanda
add5bb47d5 add dimensions() to CloudWatch templating query 2015-11-16 16:50:53 +09:00
Torkel Ödegaard
305f8d6982 Merge pull request #3234 from utkarshcmu/opentsdb-ui
Made opentsdb query editor consistent and tags are editable now
2015-11-16 06:29:02 +01:00
Torkel Ödegaard
93851a9a0f feat(tablepanel): added time series aggregations transform mode, #3219 2015-11-13 17:36:11 +01:00
utkarshcmu
8a184e9d30 Made tags editable in opentsdb query 2015-11-13 04:01:55 -08:00
utkarshcmu
3a66d7c453 Made opentsdb query editor consistent 2015-11-13 03:08:48 -08:00
Torkel Ödegaard
00a479de6f feat(tablepanel): renamed some table panel schema things 2015-11-13 10:32:01 +01:00
Torkel Ödegaard
32f9f8fcce fix(http api): correct return status code for /api/datasources/:id so it eturns 404 when not found, fixes #3217 2015-11-13 09:43:25 +01:00
Torkel Ödegaard
e4208441b3 fix(inspector): added close button to inspector modal, fixes #3213 2015-11-13 09:36:52 +01:00
Torkel Ödegaard
ee5ebe2e81 Merge pull request #3224 from mattttt/patch-6
Updating singlestat article to include troubleshooting
2015-11-13 09:32:42 +01:00
Torkel Ödegaard
a71b681e80 fix(docs): minor docs fix 2015-11-13 09:32:16 +01:00
Torkel Ödegaard
2cbc62d6c0 fix(elasticsearch): fixed elasticsearch issue, ghost docs series, fixes #3223 2015-11-13 09:30:28 +01:00
Torkel Ödegaard
cc125f5fd7 change(shortcuts): changed CTRL+F search shortcut to just F 2015-11-13 09:25:31 +01:00
utkarshcmu
7f9c8a1935 Added hours, days units and tests for all 2015-11-12 23:56:24 -08:00
utkarshcmu
3c54d14460 Added UI for time units, minute scalability 2015-11-12 20:26:59 -08:00
utkarshcmu
c5b39a5100 Added currency units 2015-11-12 09:56:46 -08:00
Matt Toback
7888c1c614 Update singlestat.md 2015-11-12 12:35:45 -05:00
Matt Toback
b43315c36d Update singlestat.md 2015-11-12 12:07:09 -05:00
Matt Toback
fbd6417c8a Updating singlestat article to include troubleshooting 2015-11-12 12:03:33 -05:00
Torkel Ödegaard
850ad1c0fb fix(tablepanel): fixed width fix for page2+, #3219 2015-11-12 16:30:15 +01:00
Torkel Ödegaard
dbe35bfed1 change(elasticsearch): changed min_doc_count to zero in query to get buckets for missing values, fixes #3131 2015-11-12 16:06:10 +01:00
Torkel Ödegaard
f1caae126e feat(build): fixed build issues 2015-11-12 14:28:35 +01:00
Torkel Ödegaard
bad6a40a12 updated readme 2015-11-12 13:50:57 +01:00
Torkel Ödegaard
5bc194e1f0 Merge branch 'master' of github.com:grafana/grafana 2015-11-12 13:47:25 +01:00
Torkel Ödegaard
d315ff2b96 changelog: updated master version to 2.6 and merged develop branch 2015-11-12 13:47:12 +01:00
Torkel Ödegaard
163b45cf37 feat(tablepanel): minor fix 2015-11-12 13:41:53 +01:00
Torkel Ödegaard
87c718f549 feat(tablepanel): worked on issues and improving defaults 2015-11-12 12:39:16 +01:00
Torkel Ödegaard
e04678f33c feat(dasbboard): fix to issues when setting fullscreen/edit state for panel that have yet to get a scope 2015-11-12 12:01:44 +01:00
Torkel Ödegaard
bbdf75bdfa feat(dashboard): Automatically go into panel edit mode after adding a new panel to the dashboard 2015-11-12 11:49:07 +01:00
Torkel Ödegaard
5c0cf9f5d7 feat(tablepanel): worked on more display options for table panel 2015-11-12 11:36:16 +01:00
Torkel Ödegaard
8a61ec4b4e feat(tablepanel): paging is starting to work 2015-11-11 16:40:36 +01:00
Torkel Ödegaard
874a6e86fd Merge pull request #3113 from utkarshcmu/custom
Added All Value support for custom type templating
2015-11-11 12:29:04 +01:00
Mitsuhiro Tanda
ef4bec1c6d fix CloudWatch dimension value suggestion 2015-11-11 20:15:30 +09:00
Torkel Ödegaard
4da99d14e6 Merge pull request #3204 from mtanda/cloudwatch_ebs_fix
CloudWatch EBS Templating fix
2015-11-11 12:09:15 +01:00
Mitsuhiro Tanda
cc80191cd1 CloudWatch ebs templating fix 2015-11-11 19:48:10 +09:00
Torkel Ödegaard
3cdb3f0d54 Merge pull request #3196 from utkarshcmu/custom-time
Timepicker display fixed for now-*
2015-11-11 11:28:55 +01:00
utkarshcmu
9f17e4ee2c Added unit tests to verify time range fix 2015-11-11 01:34:53 -08:00
utkarshcmu
509c3dc715 Fixed time range when using NOW from and to 2015-11-11 01:34:24 -08:00
Torkel Ödegaard
4051f04e66 Merge pull request #3202 from mtanda/cloudwatch_sort
sort CloudWatch suggestion region and namspace
2015-11-11 09:42:51 +01:00
Mitsuhiro Tanda
02a37d670c sort namespaces by alphabetical order 2015-11-11 14:23:43 +09:00
Mitsuhiro Tanda
a7deca1df5 sort regions by alphabetical order 2015-11-11 14:23:38 +09:00
Torkel Ödegaard
1bec6c2aae feat(tablepanel): made annotations transform work 2015-11-10 16:15:23 +01:00
Torkel Ödegaard
a66825c71f Merge branch 'master' into develop 2015-11-10 15:26:02 +01:00
Torkel Ödegaard
0fb110a4af Merge pull request #3190 from utkarshcmu/three-hours
added now-3h option
2015-11-10 15:02:34 +01:00
utkarshcmu
167c02d773 Timepicker display fixed for now-* 2015-11-10 04:53:42 -08:00
utkarshcmu
0c50a7437b missed an S at the end 2015-11-10 02:29:02 -08:00
utkarshcmu
5339ec66b7 added now-3h option 2015-11-10 02:22:15 -08:00
Torkel Ödegaard
2371dcf694 Merge pull request #3188 from mtanda/template_error_report
Fix templating error dialog for Prometheus
2015-11-10 11:21:26 +01:00
Mitsuhiro Tanda
65bc194c42 fix templating error dialog for Prometheus 2015-11-10 19:01:37 +09:00
Torkel Ödegaard
99ee38cea3 feat(tablepanel): minor change 2015-11-10 08:38:34 +01:00
Torkel Ödegaard
5d8a51c307 Merge pull request #3129 from utkarshcmu/validity
Default AWS Region selection from dropdown
2015-11-09 22:25:30 +01:00
utkarshcmu
6325635fce Corrected the frontend filename 2015-11-09 12:31:35 -08:00
utkarshcmu
7612e47aee Select AWS region from dropdown 2015-11-09 12:08:40 -08:00
Torkel Ödegaard
d06b9401ea feat(tablepanel): added support for column sorting 2015-11-09 17:58:02 +01:00
Torkel Ödegaard
ecb1552c10 Merge pull request #3172 from utkarshcmu/aws-bug
Test specified AWS Region and not hardcoded us-east-1
2015-11-09 13:54:20 +01:00
utkarshcmu
9f066d01b1 Check specified AWS Region and not us-east-1 2015-11-09 03:35:51 -08:00
Torkel Ödegaard
673ae1edc0 Merge branch 'tablepanel2' into develop 2015-11-09 11:26:06 +01:00
Torkel Ödegaard
b25b31e4a0 Merge branch 'master' into develop 2015-11-09 11:25:22 +01:00
Torkel Ödegaard
f65fde8bb7 Merge pull request #3148 from utkarshcmu/units
Added throughput units.
2015-11-09 10:14:02 +01:00
Torkel Ödegaard
0a04b135ca Merge pull request #3158 from dthapa/master
use [auth.github] -> api_url property; supports git enterprise
2015-11-09 10:12:57 +01:00
Torkel Ödegaard
88d27fe649 log(color): disabled console log formating by default 2015-11-09 09:48:02 +01:00
Torkel Ödegaard
5d166dc8cb feat(tablepanel): added new renderer spec 2015-11-09 09:46:49 +01:00
Torkel Ödegaard
8f81c97aaa Merge pull request #3170 from utkarshcmu/opentsdb-ctrl
Fixed opentsdb queryctrl uncheck Rate UI bug
2015-11-09 09:43:25 +01:00
Torkel Ödegaard
b5f35261a0 Merge pull request #3171 from utkarshcmu/aws-regions
Added missing AWS Regions
2015-11-09 09:41:57 +01:00
utkarshcmu
f55e24cd12 Added missing AWS Regions 2015-11-09 00:28:16 -08:00
utkarshcmu
efc0b60d41 Fixed opentsdb queryctrl uncheck Rate UI bug 2015-11-08 21:44:58 -08:00
Nick Owens
b0cb6d6d4c pkg/api/cloudwatch: fix api client construction against aws-sdk-go v0.10.2 2015-11-07 18:17:05 -08:00
Don Thapa
b345c7cf46 gofmt happiness 2015-11-07 11:32:06 -06:00
Torkel Ödegaard
56e2082205 Merge pull request #3157 from utkarshcmu/postgres
Fixed user deletion in Postgres SQL
2015-11-07 15:37:28 +01:00
utkarshcmu
fe2d8f1ea0 Used dialect for postgres 2015-11-07 05:21:22 -08:00
Don
e16bbc660d use [auth.github] -> api_url property; supports git enterprise 2015-11-07 00:06:15 -06:00
utkarshcmu
2676f24e0a Fixed user deletion in Postgres SQL 2015-11-06 20:17:27 -08:00
Torkel Ödegaard
b8e6fcfeae feat(tablepanel): worked on cell / value threshold coloring 2015-11-06 13:16:17 +01:00
utkarshcmu
b5f18561ab Added unit tests to verify units 2015-11-05 23:13:53 -08:00
utkarshcmu
22c3ec2d63 Made the units more readable 2015-11-05 22:57:05 -08:00
utkarshcmu
b678daa744 Added throughput units. 2015-11-05 22:31:33 -08:00
Torkel Ödegaard
e1433ebb41 feat(tablepanel) more refactoring 2015-11-05 12:42:47 -05:00
Torkel Ödegaard
1b83742e3e feat(tablepanel): began refactorin out table row html generation to write unit tests for it 2015-11-05 15:55:42 +01:00
Torkel Ödegaard
90cca93951 feat(tablepanel): lots of work on table panel 2015-11-05 13:13:13 +01:00
Torkel Ödegaard
4e37290a7f feat(tablepanel/elasticsearch): extended elasticsearch data source and query editor to support document queries 2015-11-05 09:56:19 +01:00
Torkel Ödegaard
7d3146ed8d feat(tablepanel): fixed header, and pagination styling 2015-11-05 08:36:51 +01:00
Torkel Ödegaard
0a1af65a4c feat(tablepanel): more column style rules 2015-11-05 07:43:06 +01:00
utkarshcmu
4de9ac133a Set default AWS region from dropdown now 2015-11-04 17:21:34 -08:00
Torkel Ödegaard
60c7bfe9a7 feat(tablepanel): work on table panel options 2015-11-04 22:44:08 +01:00
Torkel Ödegaard
7387f2e490 feat(tablepanel): fixed header, and pagination styling 2015-11-04 17:23:16 +01:00
Torkel Ödegaard
93b4f3fac8 feat(tablepanel): minor progress on table panel 2015-11-04 12:56:53 +01:00
utkarshcmu
fdeeb73587 AWS Region as a mandatory field 2015-11-04 02:27:35 -08:00
Torkel Ödegaard
6062930f9a feat(tablepanel): added more unit tests for table transforms 2015-11-04 09:41:03 +01:00
Torkel Ödegaard
867b838053 feat(tablepanel): work on table panel 2015-11-03 16:19:51 +01:00
Torkel Ödegaard
d7ee7cb88f Merge pull request #3119 from utkarshcmu/docs
Dashboard JSON Docs
2015-11-03 13:33:15 +01:00
Torkel Ödegaard
da9c792ca2 feat(tablepanel): minor progress 2015-11-03 08:18:35 +01:00
Torkel Ödegaard
8171cd51c4 feat(tablepanel): minor progress on table panel 2015-11-02 20:51:49 +01:00
Utkarsh Bhatnagar
74b10a42ee Fixed broken links in the doc page 2015-11-02 10:29:42 -08:00
Utkarsh Bhatnagar
f14ef22bb6 Fixed doc links 2015-11-02 10:25:33 -08:00
Utkarsh Bhatnagar
e8c9b0806a Added templating, timepicker, panel docs 2015-11-02 10:15:40 -08:00
Torkel Ödegaard
36c4d01ef8 feat(tablepanel) began work on new table panel 2015-11-02 17:00:47 +01:00
Torkel Ödegaard
e51d403420 rename: moved test file 2015-11-02 15:14:35 +01:00
Utkarsh Bhatnagar
f54615ed46 Included rows JSON and TODO headers 2015-11-02 00:21:59 -08:00
utkarshcmu
4c1b6f3059 Fixed a typo 2015-11-01 23:55:03 -08:00
Utkarsh Bhatnagar
ada641090f Explained basic JSON fields 2015-11-01 23:53:24 -08:00
Torkel Ödegaard
eb6c8a3521 Merge pull request #3118 from utkarshcmu/angular-native
Removed unnecessary components.
2015-11-02 08:23:02 +01:00
utkarshcmu
3a021a87a1 Added JSON of new dashboard 2015-11-01 23:17:23 -08:00
utkarshcmu
5100339604 Initialized dashboard JSON doc. 2015-11-01 22:58:14 -08:00
utkarshcmu
8448e3970b Removed unnecessary components. 2015-11-01 09:48:27 -08:00
Torkel Ödegaard
dbed679904 Merge pull request #3116 from vitaliyf/patch-1
Fixed typo in OpenTSDB's "metasync" documentation
2015-11-01 14:47:28 +01:00
Vitaliy Fuks
acb5340ffb Fixed typo in OpenTSDB's "metasync" documentation 2015-10-31 18:07:24 -04:00
Torkel Ödegaard
6a01cd56ca Merge pull request #3085 from mtanda/cloudwatch_template_doc
Add templating explanation to CloudWatch docs.
2015-10-31 11:37:01 +01:00
Torkel Ödegaard
ae38705bed Merge pull request #3112 from utkarshcmu/docs
Update opentsdb.md
2015-10-31 11:35:30 +01:00
Torkel Ödegaard
db083c43dd Merge pull request #3111 from felixbuenemann/fix-npm-3-phantomjs-build-failure
Fix npm 3 build failure in phantomjs task
2015-10-31 11:35:17 +01:00
utkarshcmu
c5435596ad Added All Value support for custom type templating 2015-10-30 22:34:40 -07:00
Utkarsh Bhatnagar
03130e1217 Update opentsdb.md
As we merged, changes with auto suggestions in the master branch. Update docs respectively.
2015-10-30 22:11:17 -07:00
Felix Bünemann
2ca6acc1e9 Fix npm 3 build failure in phantomjs task
npm v3.0+ by default dedupes node modules and stores them in a flat
tree, which means the hardcoded path to the location.js will no longer
be nested under the karma-phantomjs-launcher module.

This fixes issue #2999.
2015-10-31 05:36:35 +01:00
Torkel Ödegaard
d8f68eb118 refactoring: moving and renaming things 2015-10-30 16:06:29 +01:00
Torkel Ödegaard
97697b93ed refactoring: moving and renaming things 2015-10-30 15:58:20 +01:00
Torkel Ödegaard
152b484eb5 refactoring: moved app/controllers -> app/core/controllers 2015-10-30 15:16:05 +01:00
Torkel Ödegaard
97de8c1cc2 refactoring: move moving stuff around 2015-10-30 15:04:27 +01:00
Torkel Ödegaard
6cf46b1635 refactoring: more moving stuff around 2015-10-30 14:44:40 +01:00
Torkel Ödegaard
1665cb4282 refactoring: moving components -> core 2015-10-30 14:24:04 +01:00
Torkel Ödegaard
1113081aab refactoring: moving components -> core 2015-10-30 14:19:02 +01:00
Torkel Ödegaard
39bc3cb532 refactoring: moving stuff around 2015-10-30 14:04:25 +01:00
Torkel Ödegaard
0a0a0776e4 Merge pull request #3088 from utkarshcmu/suggest-opentsdb
Tag suggestions fixed for v2.1.1 & above by using Suggest Api.
2015-10-30 10:34:44 +01:00
Torkel Ödegaard
59d199a148 Merge pull request #3098 from utkarshcmu/typos
Fixed some more typos in docs
2015-10-30 10:32:51 +01:00
utkarshcmu
8b9d13491f Fixed some more typos in docs 2015-10-30 02:07:08 -07:00
Torkel Ödegaard
4dcd2ceb01 fix(graph): fixed for color picker layout issue when right side legend was used, fixes #3093 2015-10-30 09:31:04 +01:00
Torkel Ödegaard
6004ba6554 Merge pull request #3095 from itsmrwave/correct_object_key_typo
Correct object key typo
2015-10-30 09:15:27 +01:00
King'ori Maina
f1847d4501 Correct object key typo
Should be ‘message’ not ‘messsage’.
2015-10-30 09:52:33 +02:00
utkarshcmu
a27186e34f Cleaned the codebase :D 2015-10-29 11:28:38 -07:00
utkarshcmu
2a8904f844 Fixed queryCtrl to use suggest API 2015-10-29 11:13:38 -07:00
Torkel Ödegaard
59fc72d37e Merge branch 'prometheus-fix_step_calculation' of https://github.com/dan-cleinmark/grafana into dan-cleinmark-prometheus-fix_step_calculation 2015-10-29 16:50:18 +01:00
Torkel Ödegaard
34e3683ded fix(cloudwatch): fixed limiting of cloudwatch period so it works for long time ranges in all cases, fixes #3086 2015-10-29 16:46:58 +01:00
Torkel Ödegaard
13760b1bdd fix(influxdb): fixed handling of relative time ranges like last x years, or last x months, fixes #3067 2015-10-29 15:53:15 +01:00
Mitsuhiro Tanda
8a252774ce Update cloudwatch.md, add explanation about templating 2015-10-29 23:51:27 +09:00
Dan Cleinmark
963f9fdf40 Ensure Promtheus step interval is always < 11000
Using a 2 week window (1209600 seconds) and a 60s step, Math.floor()
recalculates a step of 109 and results in 11097 data points in the
Prometheus query (> the 11000 max set by Prometheus). Math.ceil()
returns a step of 110 and 10996 data points.
2015-10-29 07:24:42 -07:00
Torkel Ödegaard
299a2457cd Merge branch 'master' of github.com:grafana/grafana 2015-10-29 14:05:19 +01:00
Torkel Ödegaard
3de4707c98 feat(elasticsearch): Annotation queries now use the daily index patterns defined in data source options, for old annotations that have an index property that will be used, so will not break existing dashboard/annotation configs, closes #3061 2015-10-29 14:05:05 +01:00
Torkel Ödegaard
603ec65e91 Merge pull request #3059 from mtanda/cloudwatch_template_fix
fix panel repeat for cloudwatch
2015-10-29 12:38:22 +01:00
Torkel Ödegaard
cdcffcd31e fix(css): restored tooltip background to dark for white theme #3079 2015-10-29 12:32:32 +01:00
Torkel Ödegaard
f8a1c7c8a1 docs(cloudwatch): updated docs with info about #3080 2015-10-29 12:22:01 +01:00
Torkel Ödegaard
5d64568f3e refactoring: some minor refactoring and changes to AWS profile PR #3053 2015-10-29 11:44:34 +01:00
Torkel Ödegaard
374fbad06d Merge branch 'master' into peekeri-support_aws_profiles 2015-10-29 11:19:15 +01:00
Torkel Ödegaard
aa31336c64 Merge pull request #3072 from utkarshcmu/tooltip
Added tooltip for cloudwatch datasource
2015-10-29 11:18:24 +01:00
Torkel Ödegaard
135ba68ff5 Merge branch 'support_aws_profiles' of https://github.com/peekeri/grafana into peekeri-support_aws_profiles 2015-10-29 11:14:04 +01:00
utkarshcmu
d3ee81bb5a Added tooltip for cloudwatch datasource 2015-10-28 10:18:44 -07:00
Torkel Ödegaard
4729bea1a2 fix(dashboard): fix for collapse row by clicking on row title, fixes #3065 2015-10-28 15:04:58 +01:00
Torkel Ödegaard
e1393f9780 changelog: updated and marked 2.5 as released 2015-10-28 14:46:47 +01:00
Torkel Ödegaard
ad7e66cdae Merge pull request #3062 from utkarshcmu/templates
Fixed typos in 2.5v doc
2015-10-28 13:00:22 +01:00
utkarshcmu
2f09ef2970 Fixed typos in 2.5v doc 2015-10-28 04:27:51 -07:00
Torkel Ödegaard
6ea2c08ecb docs(): minor docs fix 2015-10-28 11:50:22 +01:00
Mitsuhiro Tanda
b82f1edd63 fix panel repeat for cloudwatch 2015-10-28 19:36:49 +09:00
Torkel Ödegaard
1685e7cea4 docs(): update to install docs and whats new in 2.5 2015-10-28 11:08:12 +01:00
Torkel Ödegaard
287d8ca367 fix(changelog): minor spelling fix to changelog 2015-10-28 09:43:28 +01:00
Torkel Ödegaard
6397b8c1ef docs(): updated version to 2.5.0 2015-10-28 09:31:57 +01:00
Torkel Ödegaard
89eedd59a8 Merge pull request #3008 from mtanda/prometheus_link
Revert prometheus graph view link
2015-10-28 09:29:07 +01:00
Jari Sukanen
23599814a3 cloudwatch: add support for defining AWS profile for CloudWatch datasource
Add support for defining AWS profile for CloudWatch datasource to support
pulling information from multiple different AWS accounts to single dashboard.

With this change, it is possible to define multiple AWS credentials in
~/.aws/credentials file and connect different data sources to different
AWS accounts.
2015-10-27 17:00:02 +02:00
Torkel Ödegaard
89ce1a5159 fix(dashlist): minor fix to dashlist panel, and some minor html markup fixes 2015-10-27 13:17:28 +01:00
Torkel Ödegaard
22a4ef42fc version change to 2.5-pre2 2015-10-27 12:22:30 +01:00
Torkel Ödegaard
358ba395ac fix(invite): minor fix to invite partials markup 2015-10-27 12:09:14 +01:00
Torkel Ödegaard
e7d5ea8a6c fix(build): revert some build script changes to make building on go 1.4 work again 2015-10-27 12:08:56 +01:00
Torkel Ödegaard
a36711e640 fix(changelog): fixed link in changelog 2015-10-27 10:26:44 +01:00
Torkel Ödegaard
6fabff4769 Merge pull request #3049 from utkarshcmu/docs
Fixed typos in cloudwatch docs
2015-10-27 07:14:41 +01:00
ubhatnagar
6af86152e6 Removed typos in cloudwatch docs 2015-10-26 21:02:04 -07:00
Torkel Ödegaard
06d97c78c8 Merge pull request #3044 from mattttt/patch-5
Updates to timepicker docs for 2.5 release
2015-10-26 21:22:22 +01:00
Matt
da31fffb16 Update timerange.md 2015-10-26 14:05:44 -04:00
Matt
c97e3ec7ae Updates to timepicker docs for 2.5 release
Updated images to be in separate PR.
2015-10-26 14:03:26 -04:00
Torkel Ödegaard
09b3433e32 change(dashboards): made home dashboard and json file dashboards editable unless otherwise specified in json file, closes #2567 2015-10-26 18:54:32 +01:00
Torkel Ödegaard
323e84375b refactoring: minor refactoring and handling of known data source plugins 2015-10-26 16:37:45 +01:00
Torkel Ödegaard
3b67a6a222 changelog: updated changelog with details of #2928 2015-10-26 16:23:29 +01:00
Torkel Ödegaard
da3d5375b8 feat(ldap): refactoring of PR #2928 updated docs 2015-10-26 16:21:03 +01:00
Torkel Ödegaard
38bd0d1aec Merge branch 'ldap-improvements' of https://github.com/abligh/grafana into abligh-ldap-improvements 2015-10-26 15:56:21 +01:00
Torkel Ödegaard
59bd029e46 docs(cloudwatch): minor cloudwatch fix 2015-10-26 15:51:34 +01:00
Torkel Ödegaard
1dbf0ad976 docs(cloudwatch): initial cloudwatch docs, closes #2900 2015-10-26 15:44:14 +01:00
Torkel Ödegaard
3b4c095b49 Merge branch 'time-independent-prometheus-metrics' of https://github.com/arthurdandrea/grafana into arthurdandrea-time-independent-prometheus-metrics 2015-10-26 14:35:17 +01:00
Torkel Ödegaard
2e155bdeda fix(dashboard): minor function name fixes, removed insert row above/below because it did not work, #2909 2015-10-26 14:33:55 +01:00
Torkel Ödegaard
8305fd0451 Merge branch 'master' of github.com:grafana/grafana 2015-10-26 14:21:49 +01:00
Torkel Ödegaard
7477667df1 docs(elasticsearch): initial elasticsearch docs, closes #2862 2015-10-26 14:21:38 +01:00
Torkel Ödegaard
a066d7ddcb Merge pull request #2909 from utkarshcmu/title
Added move row to top and bottom and insert row capability.
2015-10-26 14:15:42 +01:00
Torkel Ödegaard
a5c742cee5 docs(elasticsearch): began work on elasticsearch docs #2862 2015-10-26 13:36:00 +01:00
Torkel Ödegaard
cac142b134 Merge pull request #2937 from mtanda/scripted_datemath
Use dateMath in ScriptedDashboard
2015-10-26 12:51:09 +01:00
Torkel Ödegaard
da7ae2b0ab fix(build/aws): updated aws dependency and fixed minor build issue, fixes #3026 2015-10-26 12:48:30 +01:00
Torkel Ödegaard
052c9aca15 Merge branch 'master' of github.com:grafana/grafana 2015-10-26 12:16:13 +01:00
Torkel Ödegaard
b0e975bfce Merge pull request #3025 from utkarshcmu/docs
Added OpenSuse documentation
2015-10-26 08:22:52 +01:00
Torkel Ödegaard
d4664507f1 Merge pull request #3029 from mlbarrow/patch-1
Fix small typo in docs for InfluxDB (influxdb.md)
2015-10-26 08:22:34 +01:00
mlbarrow
21f3f859b9 Update influxdb.md
Typo fix
2015-10-24 20:58:19 -07:00
Utkarsh Bhatnagar
54be4c5e2c Made installation doc consistent 2015-10-23 23:21:00 -07:00
ubhatnagar
45cdbe0a18 Rearranged installation docs for OpenSuse 2015-10-23 23:03:58 -07:00
ubhatnagar
6c76e9728e Added OpenSuse installation command 2015-10-23 22:53:30 -07:00
Arthur D'Andréa Alemar
0a6a3f9ab7 fix(prometheus): use time independent API to list metrics and labels names
Using the "/api/v1/query" endpoint to extract information about metrics
and labels are limited to the metrics available at the time parameter
(that is set to current time), this can lead to labels not showing
because they have no value in the current time even when the dashboard
is displaying historic data.

On the other hand "/api/v1/series" returns results including every
metric and label known to Prometheus, independent of time and value.
2015-10-23 17:18:34 -02:00
Torkel Ödegaard
bad8ab232d Merge pull request #3020 from utkarshcmu/docs
Fixed 3014
2015-10-23 17:40:29 +02:00
Torkel Ödegaard
91a814d295 Merge pull request #3017 from volter/master
"No limit" was not effective for ES terms aggregation
2015-10-23 17:39:44 +02:00
ubhatnagar
7205cf8ce1 Fixed 3014 2015-10-23 07:29:15 -07:00
Torkel Ödegaard
6fe6a33da3 Merge pull request #3019 from sathieu/patch-2
Fix LimitNOFILE in RPM systemd unit
2015-10-23 16:17:45 +02:00
Torkel Ödegaard
9b59fc1c94 Merge pull request #3018 from sathieu/patch-1
Fix LimitNOFILE in Debian systemd unit
2015-10-23 16:17:28 +02:00
Mathieu Parent
e7834b885a Fix LimitNOFILE in RPM systemd unit 2015-10-23 15:13:41 +02:00
Mathieu Parent
4ec6691ea9 Fix LimitNOFILE in Debian systemd unit 2015-10-23 15:12:52 +02:00
Volker Fröhlich
6b9b08da30 Remove declaration of unused variable size 2015-10-23 12:00:20 +02:00
Volker Fröhlich
184307816f "No limit" was not effective for ES terms aggregation
This may belong to #2827
2015-10-23 11:40:40 +02:00
Mitsuhiro Tanda
c1d592b72c fix, call linkToPrometheus() directly 2015-10-23 09:58:42 +09:00
Torkel Ödegaard
2d23251da9 spelling: fixed selling in influxdb annotation partial, fixes #3012 2015-10-22 18:24:43 -04:00
Torkel Ödegaard
5b01e9ec97 fix(elasticsearch): minor fix to elasticsearch unit tests so that they work in any timezone, fixes #3010 2015-10-22 16:58:31 -04:00
Torkel Ödegaard
8526230b59 fix(influxdb_08): fixed influxdb 08 query editor issue, fixes #3009 2015-10-22 16:53:34 -04:00
Torkel Ödegaard
9d04a4c4f0 Merge branch 'master' of github.com:grafana/grafana 2015-10-22 16:23:31 -04:00
Torkel Ödegaard
ae93f2b936 fix(elasticsearch): fixed proper json escaping for lucene query, fixes #2981 2015-10-22 16:23:21 -04:00
Mitsuhiro Tanda
fcaecf4782 revert prometheus link 2015-10-23 01:35:34 +09:00
Torkel Ödegaard
0b28db7fae Merge pull request #3005 from felixbarny/elastic_ds_min_interval
Ability to set a low limit for Elasticsearch date histogram interval
2015-10-22 12:35:23 -04:00
Torkel Ödegaard
87715d6231 fix(solo panel): fixed solo panel view gray bottom for rendered image and embedd iframe scenarios, fixes #3004 2015-10-22 12:33:42 -04:00
Torkel Ödegaard
3228c4f41a changelog: updated with info about new units PR #2955 2015-10-22 12:27:22 -04:00
Torkel Ödegaard
ce5b4089b5 Merge branch 'new-units' of https://github.com/counsyl/grafana into counsyl-new-units 2015-10-22 12:19:21 -04:00
Torkel Ödegaard
0903aa4596 Merge pull request #2980 from tdyas/linkSrv_fix_query_string_appending
fix appending query strings in linkSrv
2015-10-22 12:18:27 -04:00
Torkel Ödegaard
26a45c8dc0 Merge pull request #3000 from utkarshcmu/docs
Fixed docs typos
2015-10-22 12:14:06 -04:00
Torkel Ödegaard
62c908a905 fix(build): fixed partials so they are included in optimized js file, fixes #2997 2015-10-22 10:02:29 -04:00
Felix Barnsteiner
eb8c2d9053 Ability to set a low limit for Elasticsearch date histogram interval
closes #2901
2015-10-22 13:16:36 +02:00
ubhatnagar
1e2e4ba3ad Fixed other docs typos 2015-10-21 22:09:07 -07:00
ubhatnagar
aaf4b1a399 Fixed typos in guides. 2015-10-21 21:13:39 -07:00
ubhatnagar
a5e3d7a94b Fixed datasources docs typos 2015-10-21 20:58:04 -07:00
Torkel Ödegaard
58497ed596 feat(panel): performance improvement for loading panels, closes #2994 2015-10-21 11:22:53 -04:00
Torkel Ödegaard
ae81fbdffe Merge branch 'master' of github.com:grafana/grafana 2015-10-21 10:59:17 -04:00
Torkel Ödegaard
fc0705e87c fix(elasticsearch): fix for daily pattern when getting index for today, is now using utc, fixes #2913 2015-10-21 10:59:02 -04:00
Torkel Ödegaard
c489d806dc Merge pull request #2995 from peekeri/fix_influxdb_annotation_query
influxdb: fix influxdb annotation query
2015-10-21 10:34:29 -04:00
Jari Sukanen
f3ecdc5af4 influxdb: fix influxdb annotation query 2015-10-21 14:31:06 +03:00
Torkel Ödegaard
6fecb4bf3e fix(http route): fixed dashboard-solo route to not return 404, fixes #2979 2015-10-20 10:02:56 -04:00
Tom Dyas
867ac5df67 fix appending query strings in linkSrv
When linkSrv appends parameters to a URL's query string, it would blindly
add a ? to the URL even if the URL already contained a ? or the string
to add was empty. This change fixes that behavior and some other edge cases.

Includes a unit test to verify expected behavior.
2015-10-19 13:11:34 -04:00
Torkel Ödegaard
1ff3c3be84 Merge pull request #2952 from Krinkle/fixup
docs(): Fix changelog link in whats-new-in-v2-1
2015-10-17 12:44:09 -04:00
Torkel Ödegaard
1cb9de07a6 Merge pull request #2962 from damm/docs_spellcheck
I noticed a typo when I was reviewing the docs ...
2015-10-17 12:41:25 -04:00
Torkel Ödegaard
43ca50ebbe fix(build): anonther minor build script fix 2015-10-16 13:03:20 -04:00
Torkel Ödegaard
b70b730cb9 fix(build): minor fix for build script to make latest copy for rpm when version is pre release version 2015-10-16 12:26:30 -04:00
Torkel Ödegaard
dbc1a9cf82 fix(influxdb_0.8.x): fixed issue with new timepicker ranges like The day so far, fixes #2936 2015-10-16 12:07:35 -04:00
Torkel Ödegaard
c95a991cb3 fix(panel/common): fix for query letters when importing old dashboards, fixes #2943 2015-10-16 11:58:44 -04:00
Torkel Ödegaard
c320e9d583 fix(annotations): fixed graphite annotations, broken by recent time handling changes, fixes #2947 2015-10-16 11:07:15 -04:00
Torkel Ödegaard
e507afc3d5 fix(panel): fix for firefox and placing cursor in text inputs when in panel fullscreen edit mode, fixes #2957 2015-10-16 10:10:39 -04:00
Scott M. Likens
bd77fd92bb I'm not sure what a dashboard is ... 2015-10-15 19:45:40 -07:00
Greg Look
882a988143 Add currency units from #1910. 2015-10-15 12:48:48 -07:00
Greg Look
0b3e33e226 Shorten percent unit labels. 2015-10-14 15:40:04 -07:00
Greg Look
524f5d45ec Add test for unit menu structure. 2015-10-14 14:51:42 -07:00
Greg Look
85887ad1cf Add mile, fix menu values. 2015-10-14 14:51:33 -07:00
Greg Look
d94b6635af Add temperature and pressure units, split submenus. 2015-10-14 14:50:45 -07:00
Greg Look
3c7a483f5c Add length and volume units. 2015-10-14 14:14:25 -07:00
Greg Look
dc5c3a3939 Implement decibel and percentunit units.
Add tests to exercise new units as well.
2015-10-14 14:02:29 -07:00
ubhatnagar
9185c94a2d Added insert row option in the row menu. 2015-10-14 13:45:47 -07:00
Greg Look
70269c8196 Reformat unit menu definition. 2015-10-14 13:34:48 -07:00
Greg Look
51a589f5a6 Change formatFuncCreator to scaledUnits builder.
Abstract out both decimal and binary SI prefixes into builders using
scaledUnits.
2015-10-14 13:18:02 -07:00
Greg Look
7d24c5fda2 Add fixedUnit format builder. 2015-10-14 13:07:34 -07:00
Greg Look
0bc85d27f8 Group rounding and fixed number functions. 2015-10-14 12:56:31 -07:00
Greg Look
43c2ca2d7d Group value formats by type. 2015-10-14 12:51:59 -07:00
Greg Look
e3e21a251f Group helper functions in kbn.js. 2015-10-14 12:41:20 -07:00
Timo Tijhof
267417d6a8 docs(): Fix changelog link in whats-new-in-v2-1
The quotes turn the value into a title attribute rather than href attribute,
thus on http://docs.grafana.org/guides/whats-new-in-v2-1/ this link was
rendered as <a href title="https://github.com/...">CHANGELOG.md</a>
which when clicked goes back to itself (not to GitHub).
2015-10-14 14:00:25 -04:00
Alex Bligh
e8256f0ad7 Add support for POSIX LDAP schema
In the POSIX LDAP schema, there is no 'memberOf' attribute returned
in relation to which groups a person is a member of. Rather, it is
necessary to query the group objects which have the people as members.
This commit adds an additional filter, which if specified explicitly
searches for groups, rather than relying on the 'memberOf' attribute.
This enables Grafana to work with LDAP POSIX schema (e.g. OpenLDAP
etc.)

Signed-off-by: Alex Bligh <alex@alex.org.uk>
2015-10-13 19:51:59 +01:00
Alex Bligh
458e6da700 Allow user specified CA certs
Signed-off-by: Alex Bligh <alex@alex.org.uk>
2015-10-13 19:47:24 +01:00
Alex Bligh
a906fa178a Support multiple space-separated LDAP hosts
Signed-off-by: Alex Bligh <alex@alex.org.uk>
2015-10-13 19:46:53 +01:00
Mitsuhiro Tanda
4588c5a19a pass dateMath to ScriptedDashboard script 2015-10-13 12:29:42 +09:00
Torkel Ödegaard
e873574e8c fix(logging): fixed so that router_logging = true actually logs all http requests, fixes #2902 2015-10-10 17:55:15 -04:00
Torkel Ödegaard
d09bff9039 fixed failing jshint 2015-10-10 15:11:58 -04:00
Torkel Ödegaard
c0da52aac8 fix(share): fixed share panel image url, did not generate correct url when domain name contained word dashboards, fixes #2916 2015-10-10 14:38:22 -04:00
Torkel Ödegaard
c831369974 fix(influxdb): influxdb data source did not use right http abstraction for metric queries, fixes #2919 2015-10-10 11:46:00 -04:00
Torkel Ödegaard
f0a13b2a0d Merge pull request #2914 from anryko/master
Fixed configuration example for Github OAuth team_ids.
2015-10-10 16:26:28 +02:00
anryko
b68987dcde Fixed configuration example for Github OAuth team_ids. 2015-10-09 17:05:46 +02:00
Torkel Ödegaard
c1f77eeaea Merge branch 'master' of github.com:grafana/grafana 2015-10-09 08:28:48 +02:00
Torkel Ödegaard
9b5a0a54cf minor docs fix 2015-10-09 08:28:33 +02:00
ubhatnagar
14f3a68215 Added move row to top and bottom. 2015-10-09 03:06:52 +05:30
Torkel Ödegaard
bf25b9f443 Merge pull request #2896 from garymcleanhall/fix-grafana-on-microsoft-edge
Removed spurious `</div>`, fixes Edge on Windows
2015-10-08 21:29:23 +02:00
Torkel Ödegaard
3435aaea45 fixed failing unit test 2015-10-08 17:39:06 +02:00
Torkel Ödegaard
9fc91b7aa1 fixed gofmt issue 2015-10-08 17:30:13 +02:00
Torkel Ödegaard
04eefb8480 fix(timepicker): fixed issue with timepicker and auto refresh and entering manual time when timepicker dropdown is open, fixes #2897 2015-10-08 15:35:17 +02:00
Torkel Ödegaard
8789be7671 Merge branch 'master' of github.com:grafana/grafana 2015-10-08 13:30:11 +02:00
Torkel Ödegaard
fe46410c31 Merge branch 'cloudwatch' 2015-10-08 13:09:27 +02:00
Torkel Ödegaard
aef644bd21 feat(cloudwatch): final polish to the cloudwatch editor, closes #684 2015-10-08 13:09:15 +02:00
Torkel Ödegaard
df4d5ea8a6 Merge pull request #2898 from zachm/ztm_patch_missing_downsample
Fix missing downsampling name in kairos plugin
2015-10-08 12:05:20 +02:00
Torkel Ödegaard
c34c3ac845 feat(cloudwatch): refactoring and editor improvements, #684 2015-10-08 11:37:47 +02:00
Zachary Musgrave
a94720878b Fix missing downsampling name in kairos plugin 2015-10-07 16:43:40 -07:00
Gary McLean Hall
59a2042a31 Removed spurious </div>, fixes Edge on Windows 2015-10-08 00:14:00 +01:00
Torkel Ödegaard
f680c15283 fix(snapshot): fixed issue with timepicker / time display when viewing snapshot, fixes #2892 2015-10-07 13:07:30 +02:00
Torkel Ödegaard
f11785001c docs: minor fix 2015-10-06 17:51:22 +02:00
Torkel Ödegaard
106fe4854f Merge branch 'prometheus' of github.com:grafana/grafana
Conflicts:
	pkg/models/datasource.go
	public/app/plugins/datasource/prometheus/datasource.js
	public/app/plugins/datasource/prometheus/partials/query.editor.html
	public/app/plugins/datasource/prometheus/queryCtrl.js
2015-10-06 16:36:58 +02:00
Torkel Ödegaard
44d377b810 feat(prometheus): refactoring and polish of the prometheus editor removing unused/uneeded code 2015-10-06 14:34:44 +02:00
Torkel Ödegaard
a2bf3e056d Merge branch 'prometheus' 2015-10-06 14:10:06 +02:00
Torkel Ödegaard
2d722e26de fix(cloudwatch): fixed test datasource, broken due to some function name changes 2015-10-06 14:08:48 +02:00
Torkel Ödegaard
182e079d25 Merge branch 'master' into cloudwatch 2015-10-06 14:08:10 +02:00
Torkel Ödegaard
318af9b418 small docs structure change, moved build from source into install section and CLA into project, and about into project 2015-10-06 10:38:19 +02:00
Torkel Ödegaard
0bcda4a2eb minor elasticsearch fix 2015-10-05 11:17:53 +02:00
Ivan Babrou
0dc0e03c4c Add suggest_tagk() and suggest_tagv() to OpenTSDB datasource (#2840) 2015-10-04 12:54:43 +01:00
Torkel Ödegaard
0db2d07a1a Merge pull request #2871 from mtanda/prometheus_time_fix
fix prometheus time conversion
2015-10-03 12:50:05 +02:00
Torkel Ödegaard
3d52095765 feat(cloudwatch): restored dimension keys lookup 2015-10-02 20:25:28 +02:00
Torkel Ödegaard
0912cec0e5 feat(cloudwatch): contining work on moving hard coded stuff to backend and cleaning up code, #684 2015-10-02 11:54:35 +02:00
Torkel Ödegaard
180ba33ac8 feat(cloudwatch): refactoring and cleanup of backend code, started moving hard coded stuff in the frontend to the backend, changed name of metricFind queries region() -> regions() , and namespace() -> namespaces() to be more consistent with the others, #684 2015-10-02 11:10:21 +02:00
Torkel Ödegaard
04f4454974 feat(cloudwatch): lots of code refactoring and cleanup, #684, dimension values lookup works but seems to return all dimension values no matter what dimension key you select, removed strange formating of template dimension values query, should not return key value pair but only the dimension value 2015-10-02 09:04:46 +02:00
Torkel Ödegaard
91285baea5 feat(cloudwatch): fixed failing unit tests 2015-10-02 07:32:21 +02:00
Torkel Ödegaard
875d80aa72 feat(cloudwatch): refactoring cloudwatch datasource code, #684 2015-10-02 07:25:54 +02:00
Mitsuhiro Tanda
5e19fdb492 fix prometheus time conversion 2015-10-02 12:10:11 +09:00
Torkel Ödegaard
bddcc6491a Merge pull request #2870 from ctdk/strip-format
Add config option to strip (most) colors from console logs
2015-10-01 22:31:21 +02:00
ctdk
d37e18fdcf Add config option to strip (most) colors from console logs 2015-10-01 13:16:23 -07:00
Torkel Ödegaard
af8d12124a feat(cloudwatch): code refactoring and cleanup, trying to rewrite so the code uses promises instead of ccallbacks, #684 2015-10-01 21:20:52 +02:00
Torkel Ödegaard
5e34823e7c Merge pull request #2858 from mtanda/prometheus
fix unmatched tag
2015-10-01 18:47:21 +02:00
Torkel Ödegaard
f44eaae88a Merge pull request #2867 from juliusv/fix-prometheus-link
Fix "Link to Prometheus" button for proxied Prometheus sources.
2015-10-01 18:43:19 +02:00
Julius Volz
3cc69112c1 Fix "Link to Prometheus" button for proxied Prometheus sources. 2015-10-01 18:23:29 +02:00
Torkel Ödegaard
205d4232b9 feat(cloudwatch): only support proxy mode, can remove frontend aws-sdk lib 2015-10-01 17:38:55 +02:00
Torkel Ödegaard
3c6a06a327 feat(cloudwatch): moved specs into plugins dir 2015-10-01 17:00:41 +02:00
Torkel Ödegaard
057b533a39 Merge branch 'master' into cloudwatch 2015-10-01 16:46:59 +02:00
Torkel Ödegaard
b774b91b92 Merge branch 'cloudwatch' of https://github.com/mtanda/grafana into cloudwatch
Conflicts:
	public/test/specs/cloudwatch-datasource-specs.js
2015-10-01 16:41:54 +02:00
Torkel Ödegaard
7dc923a292 fix(timepicker): fixed displaying of customk time ranges, #2861 2015-10-01 16:36:38 +02:00
Torkel Ödegaard
ef2094f817 feat(influxdb): minor progress on #2802 2015-10-01 15:48:45 +02:00
Torkel Ödegaard
f0f791d226 Merge pull request #2859 from tmonk42/graphite_multiplySeries
add graphite multiplySeries function
2015-10-01 08:39:22 +02:00
Haneysmith, Nathan
7ca261e33e add graphite multiplySeries function 2015-09-30 11:18:47 -07:00
Torkel Ödegaard
228aac2d48 Merge pull request #2857 from jimmidyson/prometheus-datasource
Prometheus fixes
2015-09-30 18:33:55 +02:00
Mitsuhiro Tanda
d0b744387b fix unmatched tag 2015-10-01 01:33:13 +09:00
Jimmi Dyson
7cadb9012a Switch to png image 2015-09-30 16:45:38 +01:00
Jimmi Dyson
0c222c4e8c Fix Prometheus test connection 2015-09-30 16:44:24 +01:00
Torkel Ödegaard
83052352dc feat(influxdb editor): lots of work on new editor, #2856 2015-09-30 17:16:34 +02:00
Torkel Ödegaard
73516c0c97 Merge pull request #2856 from jimmidyson/prometheus-datasource
Further Prometheus unit tests
2015-09-30 16:59:45 +02:00
Jimmi Dyson
b90e4057ba Convert prometheus specs to typescript 2015-09-30 15:52:15 +01:00
Jimmi Dyson
67f253830f Add Prometheus metricFindQuery unit tests 2015-09-30 14:20:58 +01:00
Jimmi Dyson
59dbe45784 Fix typo in docs 2015-09-30 14:20:58 +01:00
Torkel Ödegaard
f053b41645 feat(influxdb editor): more progress 2015-09-30 14:37:27 +02:00
Torkel Ödegaard
84615ad299 Merge pull request #2855 from jimmidyson/prometheus-datasource
Prometheus docs & better template queries
2015-09-30 13:50:11 +02:00
Jimmi Dyson
2e291d73aa jshint fixes 2015-09-30 12:46:44 +01:00
Jimmi Dyson
055efa3904 Add prometheus docs 2015-09-30 12:42:48 +01:00
Jimmi Dyson
daee7970f3 Add label_values query to get labels on a particular metric 2015-09-30 12:29:53 +01:00
Torkel Ödegaard
2dc8fcd3be poc(influxdb v3 editor): more testing of new influxdb editor approach 2015-09-29 21:32:15 +02:00
Torkel Ödegaard
2f55c18d56 Merge pull request #2847 from jimmidyson/prometheus-datasource
Add Prometheus datasource
2015-09-29 18:23:12 +02:00
Jimmi Dyson
cf0748895e Prometheus template params fixes 2015-09-29 17:07:11 +01:00
Jimmi Dyson
6e66b8a0fa Add prometheus datasource 2015-09-29 17:07:11 +01:00
Torkel Ödegaard
e70b99a706 poc: influxdb editor v3 test 2015-09-29 18:00:15 +02:00
Torkel Ödegaard
42e7a70d99 Merge pull request #2845 from sbouchex/master
Missing carriage return in init script when displaying usage
2015-09-29 16:43:52 +02:00
Torkel Ödegaard
ee85eb2779 Updated prometheus config 2015-09-29 16:22:41 +02:00
sbouchex
6c1adf9187 Missing carriage return when displaying usage 2015-09-29 16:06:40 +02:00
Torkel Ödegaard
81a660eae4 fix(phantomjs binary): fixed PR #2832 so that it works on linux, now phantomjs based server side rendershould work on mac and linux, maybe even windows 2015-09-29 15:36:13 +02:00
Torkel Ödegaard
8b029388a5 Merge branch 'phantomjs' of https://github.com/fg2it/grafana into fg2it-phantomjs 2015-09-29 15:22:37 +02:00
Torkel Ödegaard
c816ed2527 feat(usage stats): added data source count stats 2015-09-29 13:47:56 +02:00
Torkel Ödegaard
866ea7f942 added prometheus docker block 2015-09-29 08:16:26 +02:00
Torkel Ödegaard
d9d3da4def Merge branch 'prometheus-datasource' of https://github.com/jimmidyson/grafana into prometheus 2015-09-29 08:06:16 +02:00
Mitsuhiro Tanda
be6cb24e10 fix cloudwatch test 2015-09-29 13:26:04 +09:00
Mitsuhiro Tanda
1a63d9eb3b reactivate cloudwatch test 2015-09-29 12:37:34 +09:00
Jimmi Dyson
bf98cfeadc Add prometheus datasource 2015-09-28 23:15:33 +01:00
Torkel Ödegaard
8a39b32b5c refactor: moved elasticsearch specs to plugin folder and to typescript 2015-09-28 16:28:19 +02:00
Torkel Ödegaard
7d2646f60a refactor: moved influxdb specs to plugins folder 2015-09-28 15:51:02 +02:00
Torkel Ödegaard
1a8bc1dd60 fix(dashbard save as): fixed wonky behavior in save as modal, cursor to the end randomly, fixes #2837 2015-09-28 15:26:56 +02:00
Torkel Ödegaard
7e434fc019 refactor: moved graphite specs into plugins directory 2015-09-28 15:23:53 +02:00
Torkel Ödegaard
49d57cf596 feat(cloudwatch): uncommented tests, but they do not seemt to execute 2015-09-28 14:16:03 +02:00
Torkel Ödegaard
8dfbc55851 Merge branch 'cloudwatch' of https://github.com/mtanda/grafana into cloudwatch 2015-09-28 13:44:30 +02:00
Torkel Ödegaard
3cbfe21b2c Merge branch 'master' into cloudwatch 2015-09-28 13:43:03 +02:00
Torkel Ödegaard
cb7424ce5e fix(playlist ui): minor polish / fix to playlist ui, fixes #2831, other minor css / markup fixes 2015-09-28 11:53:49 +02:00
Torkel Ödegaard
a567939f78 fix(elasticsearch): quote variable value in lucene variable mulit format, fixes #2828 2015-09-28 11:28:08 +02:00
Torkel Ödegaard
a3748d4b97 fix(singestat): fixed missing sparklines, caused by recent changes to time range handling, fixes #2815 2015-09-28 09:28:43 +09:00
Torkel Ödegaard
b80dc92ca5 Merge branch 'master' of github.com:grafana/grafana 2015-09-27 12:09:45 +02:00
Torkel Ödegaard
57dee76c88 feat(elasticsearch): templating terms query should have size set to zero to return all terms, fixes #2827 2015-09-27 12:09:12 +02:00
fg2it
2513639499 fix for relative path 2015-09-26 11:38:37 +02:00
Mitsuhiro Tanda
9ae6ac25f5 refactor dataproxy_cloudwatch 2015-09-26 02:33:53 +09:00
Mitsuhiro Tanda
01ec8d0bcb fix cloudwatch time error 2015-09-26 02:33:52 +09:00
Mitsuhiro Tanda
d09e8a12b4 fix jshint error 2015-09-26 02:33:52 +09:00
Mitsuhiro Tanda
5dd64b97d2 refactor 2015-09-26 02:33:52 +09:00
Mitsuhiro Tanda
9600b1f103 add ebs_volume_ids() for templating 2015-09-26 02:33:51 +09:00
Mitsuhiro Tanda
ca9861e749 fix cloudwatch config editor 2015-09-26 02:33:51 +09:00
Torkel Ödegaard
38bf07f4da Merge pull request #2817 from utkarshcmu/axes
Single stat panel throws warning on multiple series result.
2015-09-25 10:23:10 +02:00
fg2it
189796891e adding phantomjs task to default and build 2015-09-25 01:19:49 +02:00
fg2it
8134905a86 new grunt task for setting phantomjs binary 2015-09-25 00:56:56 +02:00
fg2it
45fb760a35 removing default phantomjs binary 2015-09-25 00:51:54 +02:00
ubhatnagar
026fffa19f Singlestat Panel Error in InspectCtrl. 2015-09-24 23:13:05 +05:30
Torkel Ödegaard
9da5ef3cbf fix(singestat): fixed missing sparklines, caused by recent changes to time range handling, fixes #2815 2015-09-24 11:36:25 +02:00
ubhatnagar
ea7fe0c761 Single stat panel throws warning on multiple series result. 2015-09-24 09:40:09 +05:30
Torkel Ödegaard
25d7b8d08d feat(cloudwatch): resumed work on cloudwath datasource, #684, #2445 2015-09-23 21:13:19 +02:00
Torkel Ödegaard
f467cb8cd2 Merge branch 'master' of github.com:grafana/grafana 2015-09-23 20:57:50 +02:00
Torkel Ödegaard
f4e3c0a2e4 Revert "poc: some tests for new influxdb editor"
This reverts commit 075d01820c.
2015-09-23 20:57:29 +02:00
ubhatnagar
9cdf0601eb Removed unnecessary statement. 2015-09-23 22:20:38 +05:30
ubhatnagar
28ef972c9f Added duplicate feature for variable. 2015-09-23 22:13:38 +05:30
Torkel Ödegaard
c66476f6b6 Merge pull request #2810 from tuxinaut/master
Fix changelog formatting
2015-09-23 11:42:29 +02:00
Denny Schäfer
3f90220208 Fix changelog formatting 2015-09-23 11:35:20 +02:00
Torkel Ödegaard
81ebaae12e feat(opentsdb): templating, added default match format option, now new variables with opentsdb datasource will automatically use the new pipe format, #2808 2015-09-23 11:18:56 +02:00
Torkel Ödegaard
71984b25e9 Merge pull request #2808 from utkarshcmu/master
Implemented Opentsdb MultiSelect Templating.
2015-09-23 11:16:01 +02:00
Torkel Ödegaard
4100c9881a fix(panel): fixed selecting text in fullscreen edit mode 2015-09-23 10:03:30 +02:00
Torkel Ödegaard
075d01820c poc: some tests for new influxdb editor 2015-09-23 09:58:36 +02:00
ubhatnagar
6f43cbf665 Added unit tests for all and multi format options. 2015-09-23 13:24:59 +05:30
ubhatnagar
866f48f92d Added pipe in All Format list. 2015-09-23 13:12:35 +05:30
Torkel Ödegaard
2790e4e819 change(influxdb): removed derivative functions from aggregator list 2015-09-23 09:40:53 +02:00
Torkel Ödegaard
8bb2b5e290 fix(influxdb): have alias field visible when using raw query mode, fixes #2803 2015-09-23 09:34:05 +02:00
Torkel Ödegaard
63290d0f5d changelog: added OpenTSDB enhancement fetch aggregators from OpenTSDB to the changelog, #1646 2015-09-23 09:19:21 +02:00
Torkel Ödegaard
5c55617585 refactor: polishing OpenTSDB related PR #1646, added caching of aggregators request so only one call is made 2015-09-23 09:17:37 +02:00
Torkel Ödegaard
0e0caabf7d Merge branch 'master' of https://github.com/mxk1235/grafana into mxk1235-master 2015-09-23 09:08:29 +02:00
ubhatnagar
024a319512 Implemented Opentsdb MultiSelect Templating. 2015-09-23 11:22:57 +05:30
Torkel Ödegaard
f632b3b029 feat(elasticsearch): added new templating all format and muli format named , also added automatic setting of correct all and multi format depending on data source, closes #2696 2015-09-22 14:29:41 +02:00
Torkel Ödegaard
b37f9a7db0 fix(graphite): minor fix to query editor when using summarize function with no metric segments, only series ref, fixes #2788 2015-09-22 13:32:28 +02:00
Torkel Ödegaard
6f9c306260 fix(singlestat): fixed usage of template variable in drilldown link for singlestat, fixes #2787 2015-09-22 12:48:03 +02:00
Torkel Ödegaard
c369350ca7 Merge branch 'master' of github.com:grafana/grafana 2015-09-22 09:32:13 +02:00
Torkel Ödegaard
bc3c394210 feat(elasticsearch): worked on elasticsearch templating support, #2696 2015-09-22 09:31:58 +02:00
Torkel Ödegaard
e9df31b650 Merge pull request #2799 from utkarshcmu/master
Fixed #2798. Removed unused components.
2015-09-22 08:50:00 +02:00
ubhatnagar
e49bb1ccc0 Fixed #2798. Removed unused components. 2015-09-22 12:16:10 +05:30
Torkel Ödegaard
0ef8e086a2 fixed(share modal): fixed issue with sharemodal introduced with recent change to time handling, #2791 2015-09-22 07:32:47 +02:00
Torkel Ödegaard
63665dccae fix(grafana datasource): fixed the built in test data source, fixes #2795 2015-09-22 07:12:36 +02:00
Torkel Ödegaard
792b194d0e feat(elasticsearch): finished work on adding support for filters aggregate, you can now split series by query using group by filters, closes #2785 2015-09-21 20:29:05 +02:00
Torkel Ödegaard
e694a74c9d feat(elasticsearch): work on supporting filters aggregate, #2785 2015-09-21 19:23:18 +02:00
Torkel Ödegaard
9de016bfe3 feat(elasticseach): alias and lucene query fields are now visible/usable when using raw json query, #1034 2015-09-21 12:07:03 +02:00
Torkel Ödegaard
3eddfc028e Merge branch 'patch-2' of https://github.com/cagdascirit/grafana 2015-09-21 11:32:36 +02:00
Torkel Ödegaard
2125648798 Merge branch 'master' of https://github.com/tuxinaut/grafana 2015-09-21 11:31:25 +02:00
Torkel Ödegaard
24bff6e04d ui(dashboard cog icon): minor change to PR #2772 that adds tooltip to cog icon 2015-09-21 11:29:14 +02:00
Torkel Ödegaard
cdab0d208e Merge branch 'master' of https://github.com/utkarshcmu/grafana into utkarshcmu-master 2015-09-21 11:16:50 +02:00
Torkel Ödegaard
20f04ab352 fix(build): fixed requirejs optimized build 2015-09-21 09:36:17 +02:00
Torkel Ödegaard
7f1af24318 fix(ldap): fixed ldap org roles sync, did only add one new role per login, now all roles are added, fixes #2766 2015-09-21 09:19:50 +02:00
Torkel Ödegaard
8d87db58c6 docs(): added link to external install tutorial / article 2015-09-21 09:04:59 +02:00
Torkel Ödegaard
feae4c6c8b fix(ldap): fixed syncing of email and name from ldap, fixes #2765 2015-09-21 09:02:52 +02:00
ubhatnagar
00c89b8354 Removed .jshintrc statement, implemented manage dashboard tooltip. 2015-09-21 08:09:34 +05:30
Torkel Ödegaard
fb767f5680 change: removed drilldown links from extended panel menu 2015-09-19 15:59:29 +02:00
Torkel Ödegaard
c7d22aafd2 feat(drilldown link): better access to drilldown links directly by clicking the external link icon in panel header, fixes #1575 2015-09-19 15:53:48 +02:00
Torkel Ödegaard
b5f237a69b fix(graph): minor fix for hover tooltip when combined with a single series using stepped lines, fixes #2754 2015-09-19 15:20:53 +02:00
Torkel Ödegaard
b4093ccf59 fix(graphite): minor fix to editor, add function dropdrop extended above page, fixes #1152 2015-09-19 14:58:52 +02:00
Torkel Ödegaard
a23217cc6f fix(influxdb): clear existing Authorization header when proxying request to InfluxDB, fixes #2778 2015-09-19 12:32:35 +02:00
Torkel Ödegaard
f4f7f47901 changelog: updated changelog with info about timepicker feature 2015-09-19 12:26:21 +02:00
Torkel Ödegaard
2f68687de9 feat(timepicker2): temporarily removed the the option to define custom quick range, will have to be part of future issue, closes #2761 2015-09-19 12:20:24 +02:00
Torkel Ödegaard
86f4907cc4 feat(panel fullscreen): completly changed how the fullscreen view/edit works, no longer uses css fixed position, yay, closes #2779 2015-09-19 11:40:51 +02:00
Torkel Ödegaard
bffb795d7a feat(timepicker): small style change for timepicker 2015-09-19 10:54:17 +02:00
Torkel Ödegaard
c21cffa6d4 fix(timepicker): UTC now works in all scenarios I can think of, manual edit, date picker edit, #2761 2015-09-18 21:01:13 +02:00
Torkel Ödegaard
96b0e70ddd feat(timepickerv2): fixed lots of minor issues and updated kairosdb and opentsdb data sources to work with the new date formats 2015-09-18 15:06:08 +02:00
Torkel Ödegaard
febe56b062 feat(timepicker): fixed zoomout 2015-09-18 14:04:53 +02:00
Torkel Ödegaard
f5e6722826 feat(timepickerv2): more work on new timepicker, now absolute time mixed with relative time works, yesterday, this day last week, etc now work 2015-09-18 13:54:31 +02:00
Torkel Ödegaard
cea13b1823 feat(timepicker2): moved to controllerAs and bindToController for timepicker component 2015-09-18 12:41:32 +02:00
Torkel Ödegaard
3d85e85f29 fix(events): fixed handling of onAppEvents when used from rootScope, must supply localscope, can now be used in isolate scope scenarios 2015-09-18 12:15:06 +02:00
ubhatnagar
923df8244b Search Dashboard Panel will hide if 'Manage Dashboards' is clicked. 2015-09-18 02:30:02 -07:00
Torkel Ödegaard
69db9e0d45 feat(timepickerV2): absolute time / calendar picker works, #2761 2015-09-18 11:01:37 +02:00
Denny Schäfer
d1534d4dcf Fix changelog (double issue entry and missing version headline) 2015-09-18 10:46:04 +02:00
Torkel Ödegaard
a8a94ef87b Merge branch 'master' into timepicker2 2015-09-18 10:36:50 +02:00
Torkel Ödegaard
f93215f4ec feat(timepicker2): more progress 2015-09-18 10:36:47 +02:00
ubhatnagar
9af460600d Fixed Indentation and Grunt run. 2015-09-18 00:50:55 -07:00
ubhatnagar
e1576b7131 Added Manage Dashboard Tooltip, hides when clicked. 2015-09-18 00:45:29 -07:00
Torkel Ödegaard
5d05de8bda Merge branch 'master' of github.com:grafana/grafana 2015-09-18 08:39:06 +02:00
Torkel Ödegaard
5e949b0564 fix(quota): fixed failing quota unit tests 2015-09-18 08:36:58 +02:00
Torkel Ödegaard
2a52d9bdf6 feat(timepicker2): more work on new timepicker 2015-09-18 08:17:19 +02:00
Torkel Ödegaard
ebf49d0668 Merge pull request #2771 from decbis/maxSeries-func-for-groupByNode
Added maxSeries option for groupByNode Graphite function
2015-09-18 08:16:46 +02:00
Torkel Ödegaard
f42955ab99 Merge pull request #2755 from Dieken/patch-1
fix duplicate tag value suggestions and relax limit on /api/search/lookup
2015-09-18 08:08:18 +02:00
Mike Kobyakov
5d516592d9 Merge remote-tracking branch 'upstream/master' 2015-09-17 23:07:52 -07:00
Eugen Dinca
7dc2b36413 Added maxSeries option for groupByNode function 2015-09-17 16:50:09 -04:00
Torkel Ödegaard
5eefa36111 feat(timepickerv2): big progress on new design of new timepicker, #2761 2015-09-17 22:44:59 +02:00
Cagdas Cirit
c26209d579 Update debian.md 2015-09-17 18:25:36 +02:00
Torkel Ödegaard
a30f73fe36 feat(timepicker): more work on getting new time formats to work in all data sources 2015-09-17 12:40:04 +02:00
Torkel Ödegaard
1a9c52e17f feat(timepicker): lots of big changes, moving to datemath from kbn.parseDateMath, moving to moment dates instead of native javascript dates 2015-09-17 11:21:38 +02:00
Torkel Ödegaard
5ad38ee9f8 feat(timepicker2): fixed timesrv specs 2015-09-17 09:57:59 +02:00
Torkel Ödegaard
4c79591403 fix(graphite): removed debug comment 2015-09-17 09:45:53 +02:00
Torkel Ödegaard
a9812167d7 feat(timepicker2): worked on more rich time range support 2015-09-17 09:44:51 +02:00
raj dutt
a7cc36f741 Update CHANGELOG.md 2015-09-16 23:50:02 -04:00
raj dutt
4c0262cbd0 Update CHANGELOG.md 2015-09-16 23:46:48 -04:00
Torkel Ödegaard
a0a98cb035 feat(timepicker2): working on richer timepicker options 2015-09-16 19:49:05 +02:00
Liu Yubao
4fccfbf543 increase limit to lookup unique metric tag values
Default limit is 25 which is too small. Considering currently the
/api/search/lookup query isn't narrowed down by selected tag
keys and values (see https://github.com/grafana/grafana/pull/1433), 
the limit is set to 3000, this should be fine because people rarely
create new graph panel.
2015-09-17 01:36:36 +08:00
Liu Yubao
62ae80eeae deduplicate tag value suggestions for OpenTSDB 2015-09-17 01:25:28 +08:00
Torkel Ödegaard
d705108be5 feat(timepicker2): added date math tests 2015-09-16 18:48:41 +02:00
Torkel Ödegaard
3912eb7b26 Merge branch 'master' into timepicker2 2015-09-16 17:03:08 +02:00
Torkel Ödegaard
1f959272c5 feat(migration): added back support to import old dashboard from from Elasticsearch 2015-09-16 16:28:41 +02:00
Torkel Ödegaard
f9cd942363 Merge branch 'master' of github.com:grafana/grafana 2015-09-16 15:17:57 +02:00
Torkel Ödegaard
1282da52eb Merge pull request #2750 from elliot/patch-1
Fixed 404 error for robots.txt
2015-09-16 12:09:56 +02:00
Elliot Anderson
10c099a52d Fixed 404 error for robots.txt 2015-09-16 11:02:50 +10:00
Torkel Ödegaard
fa0542ca8b Merge branch 'quotas' of https://github.com/raintank/grafana into raintank-quotas 2015-09-15 14:33:21 +02:00
woodsaj
3926226417 fix getting default quota as map[string]int64 2015-09-15 20:31:58 +08:00
Torkel Ödegaard
6a30511fc4 Merge branch 'quotas' of https://github.com/raintank/grafana into raintank-quotas 2015-09-15 14:23:13 +02:00
Torkel Ödegaard
4ffa26cf2c feat() started work on more feature rich time picker 2015-09-15 13:23:36 +02:00
woodsaj
1ad10914ce add quota middleware unittests 2015-09-15 18:19:47 +08:00
woodsaj
86ed85aa6e move toMap function to be a method on the quota structs 2015-09-15 17:18:26 +08:00
woodsaj
b7de847236 add unittests for quota sqltore methods. 2015-09-15 17:10:46 +08:00
woodsaj
3cf2cd4684 be sure to pass result obj by reference to xorm. 2015-09-15 17:10:16 +08:00
Torkel Ödegaard
2b95cd5081 refactor: moving routes into core, improved bundle loader 2015-09-15 08:53:06 +02:00
Torkel Ödegaard
8f45324bce fix(): fixed requirejs build 2015-09-15 08:20:26 +02:00
Torkel Ödegaard
5545cdbf4d refactor: improving structure, moving things into a core module 2015-09-15 08:17:40 +02:00
Torkel Ödegaard
9dec50832d refactor: refactoring app boot up and core structure 2015-09-14 22:54:00 +02:00
Torkel Ödegaard
64973f1d57 fix(settings): another attempt at fixing, #2736 2015-09-14 15:54:35 +02:00
Torkel Ödegaard
d7bfb727b0 fix(settings): reverted prev settings fix for detecting public_gen folder in dev, caused issue for prod build 2015-09-14 13:42:23 +02:00
Torkel Ödegaard
37ad58c69e fix(gofmt): somehow api.go did not pass gofmt test 2015-09-14 12:30:30 +02:00
Torkel Ödegaard
de753bf330 fix(build): fixed issue with ngAnnotate not including files in core/*, fixes #2733 2015-09-14 09:33:26 +02:00
Torkel Ödegaard
d17f8538b2 fix(backend): made public_gen detection more bullet proof, #2731 2015-09-13 15:23:23 +02:00
Torkel Ödegaard
1900c81d9f tech(nodejs upgrade): upgraded to nodejs 4.0 for grunt build 2015-09-13 12:13:47 +02:00
Torkel Ödegaard
9db6f82628 refactor: finished timepicker to typescript and directive refactor 2015-09-12 12:52:50 +02:00
Torkel Ödegaard
d96a6a59ee refactor: moved timepicker from a simple panel to component, removed simple panel directive 2015-09-12 11:49:27 +02:00
Torkel Ödegaard
7535677ed4 fix(build): fixed failing tslint test 2015-09-11 21:04:02 +02:00
woodsaj
6488324cf1 enhance quota support.
now includes:
- perOrg (users, dashboards, datasources, api_keys)
- perUser (orgs)
- global (users, orgs, dashboards, datasources, api_keys, sessions)
2015-09-11 23:17:10 +08:00
Torkel Ödegaard
812e4c7cd4 refactor: moved array join directive to typecrtipt 2015-09-11 10:54:56 +02:00
Torkel Ödegaard
85baae1ebd feat(influxdb): added back fill option to editor, forgot to add it in the new updated query editor 2015-09-11 10:42:50 +02:00
Torkel Ödegaard
8174b9f041 fix(tests): fixed failling backend test 2015-09-11 08:58:45 +02:00
Torkel Ödegaard
fb9e91e486 Merge branch 'master' of github.com:grafana/grafana 2015-09-11 08:13:49 +02:00
Torkel Ödegaard
1429737a60 tech(typescript): added not about typescript to changelog 2015-09-11 08:11:17 +02:00
Torkel Ödegaard
286b4c0e46 Merge branch 'typescript' 2015-09-11 08:01:11 +02:00
Torkel Ödegaard
166a3c4d64 tech(typescript): added tslint to default task 2015-09-11 08:00:13 +02:00
Torkel Ödegaard
d4a701aad0 tech(typescript): more work on typescript 2015-09-11 07:47:00 +02:00
woodsaj
47bf1bd21a return 404 when quotas not enabled. 2015-09-11 01:51:12 +08:00
woodsaj
852f9bd277 refactor quota settings 2015-09-11 01:47:33 +08:00
woodsaj
555cbeffa5 allow all users to retrieve org and quota data. 2015-09-11 01:18:36 +08:00
woodsaj
3d4d822528 implement updateQuota function 2015-09-11 01:04:29 +08:00
woodsaj
c238130842 quote table names passed by arguments 2015-09-11 01:04:22 +08:00
woodsaj
76e9ebde36 always return after errors. 2015-09-11 01:03:58 +08:00
woodsaj
0688050552 add quota middleware to enforce quotas. issue #321
Conflicts:
	pkg/api/api.go
2015-09-11 01:03:47 +08:00
woodsaj
9023171940 inital backend suport for quotas. issue #321
Conflicts:
	conf/defaults.ini
	main.go
	pkg/services/sqlstore/migrations/migrations.go
2015-09-11 01:01:36 +08:00
Torkel Ödegaard
84371f03b5 tech(typescript): more testing and migration 2015-09-10 16:47:38 +02:00
Torkel Ödegaard
20407bca89 tech(typescript): converted signup controller to typescript 2015-09-10 16:21:57 +02:00
Torkel Ödegaard
96af2debfc Merge pull request #2717 from sileht/sileht/dataproxy_test-bug
Fix dataproxy_test.go tests
2015-09-10 15:51:02 +02:00
Mehdi Abaakouk
6e532231dc Fix dataproxy_test.go tests
This change fix dataproxy_test.go tests that was failing with:

pkg/api/dataproxy_test.go:17: not enough arguments in call to NewReverseProxy
pkg/api/dataproxy_test.go:39: not enough arguments in call to NewReverseProxy
FAIL	_/home/ubuntu/grafana/pkg/api [build failed]
2015-09-10 13:27:49 +00:00
Torkel Ödegaard
dceec44671 removed tsconfig 2015-09-10 14:36:05 +02:00
Torkel Ödegaard
0b5f40e66c tech(): made config system check for generated css or javascript files and panic if there are none, also if there is a public_gen directory it will use that, even if static root is set to public 2015-09-10 13:34:32 +02:00
Torkel Ödegaard
da832368f0 dev building and optimized builds work 2015-09-10 12:42:24 +02:00
Torkel Ödegaard
abac8bccc6 tech(typescript): its looking good 2015-09-10 11:26:40 +02:00
Torkel Ödegaard
82061c7c3b experiments 2015-09-09 20:37:27 +02:00
Torkel Ödegaard
ec3a684be2 Merge pull request #2711 from jsternberg/master
Update docstring for postgres session provider
2015-09-09 20:17:23 +02:00
Jonathan A. Sternberg
5572588c54 Update docstring for postgres session provider
The postgres provider is named postgres and not postgresql. For somebody
configuring the server from the config file example, it is very easy to
write an invalid value into the file and accidentally use the "memory"
provider instead because of a typo.
2015-09-09 14:06:14 -04:00
Torkel Ödegaard
a8197df1c1 more experiments for mixing javascript and typescript 2015-09-09 19:34:24 +02:00
Torkel Ödegaard
005e1e002b more playing around with typescript 2015-09-09 17:40:52 +02:00
Torkel Ödegaard
9603dce469 feat(dataproxy): added whitelist setting and feature for data proxies, closes #2626 2015-09-09 17:21:25 +02:00
Torkel Ödegaard
13190f6f15 fixed version in changelog 2015-09-09 14:25:59 +02:00
Torkel Ödegaard
eaa061d90c cleanup(): removed old influxdb editor partial 2015-09-09 14:18:56 +02:00
Torkel Ödegaard
1ab374008f feat(influxdb): completed work on new influxdb query editor, now supports #2708, #2647, #2599 2015-09-09 14:17:55 +02:00
Torkel Ödegaard
8c9d551826 feat(influxdb): you can now change group by time iunterval on per query basis, #2647 2015-09-09 13:48:57 +02:00
Torkel Ödegaard
c4c3f9dc1f feat(influxdb): more work on changing the influxdb editor to support better aliasing and interval options, #2647, #2599 2015-09-09 13:39:45 +02:00
Torkel Ödegaard
5b722deb39 feat(influxdb): began work on changing the influxdb editor to support better aliasing and interval options, #2647, #2599 2015-09-09 11:59:02 +02:00
Torkel Ödegaard
b61b7f0c3b fix(influxdb): fixed editor bug introduced in recent commit 2015-09-09 11:05:49 +02:00
Torkel Ödegaard
3c40310e9b tech(typescript): testing for how to migrate to typescript 2015-09-09 09:57:06 +02:00
Torkel Ödegaard
7a167742d0 Merge pull request #2701 from brunoqc/patch-1
Fix typo
2015-09-08 22:31:58 +02:00
Bruno Bigras
d2a798eb3d Fix typo 2015-09-08 16:16:51 -04:00
Torkel Ödegaard
3e9e34afb8 feat(annotations): polished up the annotation editor tabs, similar to recent commits for templating editor 2015-09-08 16:59:39 +02:00
Torkel Ödegaard
26d8a041ef fix(templating): updated templating UI and tab behavior, now edit tab is not visible when not actually editing an existing var, fixes #2679 2015-09-08 15:54:08 +02:00
Torkel Ödegaard
7c25edc7b2 docs(): fixed iframe snapshot links in docs, fixes #2682 2015-09-08 15:24:20 +02:00
Torkel Ödegaard
2725053c82 docs(): fixed iframe snapshot links in docs, fixes #2682 2015-09-08 15:19:59 +02:00
Torkel Ödegaard
e2cb66f8d7 fix(logging): removed temp dev logging code 2015-09-08 14:32:25 +02:00
Torkel Ödegaard
fad1d4cf98 feat(organization): added update org address to http api and to org details settings view, closes #2672 2015-09-08 14:22:44 +02:00
Torkel Ödegaard
daf64421f2 fix(api): Added error handling to create and update org http apis and sql update handlers, now checks for org name taken scenarios and returns correct http error code and message, fixes #2686 2015-09-08 13:06:18 +02:00
Torkel Ödegaard
fa3329271d fix(email notifications): added error handling to email template parsing, fixes #2690 2015-09-08 10:57:47 +02:00
Torkel Ödegaard
fdcb4473af fix(api auth): return 401 for authentication errors and 403 for access denied errors, fixes #2693 2015-09-08 10:46:31 +02:00
Torkel Ödegaard
41154d6d11 fix(elasticsearch): fixed series naming & aliasing when using field for extended_stats and percentiles 2015-09-08 10:08:27 +02:00
Torkel Ödegaard
9904d01958 fix(unit test): trying to fix failing unit test due to timezone different on build server 2015-09-08 09:50:50 +02:00
Torkel Ödegaard
171ed497f9 version bumped to 2.5 2015-09-08 09:20:46 +02:00
Torkel Ödegaard
212a8ad6a6 Merge branch 'master' of github.com:grafana/grafana 2015-09-08 09:18:03 +02:00
Torkel Ödegaard
f2c518ba24 Merge branch 'elastic_ds'
Conflicts:
	public/app/plugins/datasource/influxdb/queryCtrl.js
2015-09-08 09:17:34 +02:00
Torkel Ödegaard
f8b61a4ebe changelog(): added #1034 to changelog 2015-09-08 09:16:41 +02:00
Torkel Ödegaard
35cc3837a0 feat(elasticsearch): more work on alias pattern, #1034 2015-09-08 09:10:26 +02:00
Torkel Ödegaard
572a80d1d1 feat(elasticsearch): metric response handling and processsing now supports alias patterns, {{term field name}} and {{metric}} now works, #1034 2015-09-07 23:15:49 +02:00
Torkel Ödegaard
2aa695fb66 feat(elasticsearch): refactoring elasticsearch response handling to support series alias patterns 2015-09-07 20:00:27 +02:00
Torkel Ödegaard
f361f324da feat(elasticsearch): more polish to editor, made interval configurable per query, #1034 2015-09-07 16:35:40 +02:00
Torkel Ödegaard
3999a3caa2 feat(elasticsearch): extended stats like std deviation now works, and sigma option as well, added unique count (cardinality as well, #1034 2015-09-07 13:13:27 +02:00
Torkel Ödegaard
efc3def7f2 feat(elasticsearch): small refactoring and polish 2015-09-07 09:36:56 +02:00
Torkel Ödegaard
6c304924f7 feat(elastic_ds): moving time field name to dataasource option, it is no longer specified for each query and date_histogram 2015-09-07 08:57:46 +02:00
Torkel Ödegaard
f90714f8fe feat(elasticsearch): changed default sort to asc 2015-09-06 16:11:01 +02:00
Torkel Ödegaard
0960360b35 feat(elasticsearch): added support for index time based patterns, #1034 2015-09-06 16:09:42 +02:00
Torkel Ödegaard
8db47e335f fixed jshint errors 2015-09-06 14:45:12 +02:00
Torkel Ödegaard
c609f67e16 fixed broken editors because of some recent refactorings 2015-09-06 14:29:28 +02:00
Torkel Ödegaard
14cb2b0143 began work on support index time patterns 2015-09-06 12:58:53 +02:00
Torkel Ödegaard
b24c539206 feat(elasticsearch): began work on supporting extended stats metric agg, it gives you standard deviation and more 2015-09-05 20:22:54 +02:00
Torkel Ödegaard
52eeefa6d9 feat(elasticsearch): fields are fetch from mapping instead of docs, you can enter custom value in field options, other fixes, #1034 2015-09-05 19:55:58 +02:00
Torkel Ödegaard
f942ec952e feat(elasticsearch): worked on percentiles metric aggregator in editor and in elasticsearch response processing 2015-09-05 18:31:42 +02:00
Torkel Ödegaard
3e9aca3ed4 feat(elasticsearch): terms aggregation options are working, things are starting to come together, #1034 2015-09-05 15:41:04 +02:00
Torkel Ödegaard
2d832e10b0 feat(elasticsearch): term metric filters are starting to work! like terms aggregation with top 5, order by metric 1 desc, where metric 1 is maybe average of @load 2015-09-05 12:51:05 +02:00
Torkel Ödegaard
f1e995ec79 feat(elasticsearch): added bucket agg id concept 2015-09-05 12:24:14 +02:00
Torkel Ödegaard
756ec8ccd7 feat(elasticsearch): close to getting group by term options ui working 2015-09-05 10:48:11 +02:00
Torkel Ödegaard
c48f24d269 feat(editor): more work on editor components, extracing things and making reusable directives 2015-09-05 10:14:21 +02:00
Torkel Ödegaard
0d2e13549a feat(editor): thing are starting to work again 2015-09-05 09:05:09 +02:00
Torkel Ödegaard
f9ce9bdcec feat(editor): refactoring and making new editor abstractions 2015-09-05 08:07:40 +02:00
Torkel Ödegaard
e339dbf473 feat(elasticsearch): so much work on new editor, its pretty broken right now, but when it is done it is going to be amazing 2015-09-04 22:10:56 +02:00
Torkel Ödegaard
f27f028d44 Merge pull request #2676 from jd/master
doc: fix link to basic concepts
2015-09-04 17:34:01 +02:00
Julien Danjou
697aaf7e70 partials: fix closing markup in datasourceHttpConfig 2015-09-04 17:14:27 +02:00
Torkel Ödegaard
cc1e3d0101 feat(elasticsearch): groundwork for a much more sophisticated elasticsearch query editor 2015-09-04 16:05:47 +02:00
Torkel Ödegaard
9daa3997e9 feat(elasticsearch): time field selector now works, #1034 2015-09-04 11:17:52 +02:00
Torkel Ödegaard
83930ec9d1 feat(elasticsearch): raw queries work, more unit tests and polish, #1034 2015-09-04 09:41:23 +02:00
Torkel Ödegaard
b83a1bf4cc Merge pull request #2673 from rodo/add_doc_app_mode
Add comments on app_mode with possible values
2015-09-04 09:25:44 +02:00
Rodolphe Quiédeville
668b47cc6e Add comments on app_mode with possible values 2015-09-04 09:01:06 +02:00
Torkel Ödegaard
f29471521c Merge pull request #2663 from victorhooi/patch-1
Add note about phantomjs binary and PNG rendering.
2015-09-04 08:40:53 +02:00
Torkel Ödegaard
5beced458c fix(): fixed problems in last commit 2015-09-04 07:41:50 +02:00
Torkel Ödegaard
ead451a979 feat(influxdb): More alias options, can now use syntax to reference part of a measurement name (seperated by dots), closes #2599 2015-09-04 07:40:28 +02:00
Torkel Ödegaard
97d42991a7 fix(graph tooltip): fixed graph tooltip when stacking and one series is not stacked (via override), fixes #2670 2015-09-03 21:18:05 +02:00
Torkel Ödegaard
cd6bdc1a78 fix(influxdb): fixed influxdb template var filter suggestion, fixes #2666 2015-09-03 20:53:20 +02:00
Torkel Ödegaard
977f538420 feat(elasticsearch): lots of work on elasticsearch metrics query editor, #1034 2015-09-03 16:35:11 +02:00
Torkel Ödegaard
590b155c6c feat(elasticsearch): lots of work on elasticsearch metrics query editor, #1034 2015-09-03 15:56:41 +02:00
Torkel Ödegaard
7e9f11ea1c feat(elasticsearch): lots of work on elasticsearch metrics query editor, #1034 2015-09-03 14:55:48 +02:00
Torkel Ödegaard
64cee145e0 Merge pull request #2669 from jd/master
doc: fix link to basic concepts
2015-09-03 14:03:04 +02:00
Torkel Ödegaard
525724cc1f feat(elasticsearch): lots of work on elasticsearch metrics query editor, #1034 2015-09-03 14:02:31 +02:00
Julien Danjou
63f9dc826f doc: fix link to basic concepts 2015-09-03 14:01:32 +02:00
Torkel Ödegaard
b3bda02063 feat(elasticsearch): lots of work on elasticsearch metrics processing, handling grouped responses, etc, #1034 2015-09-03 12:35:21 +02:00
Torkel Ödegaard
df1d56e7b1 feat(elasticsearch): lots of work on elasticsearch metrics processing, handling grouped responses, etc, #1034 2015-09-03 11:14:25 +02:00
Victor Hooi
b80adcc00a Add note about phantomjs libraries. 2015-09-03 16:35:49 +10:00
Victor Hooi
68a7c3fa3b Add note about phantomjs binary and PNG rendering. 2015-09-03 16:29:58 +10:00
Torkel Ödegaard
d099d8950f feat(elasticsearch): lots of work on elasticsearch metrics queries, #1034 2015-09-03 08:18:00 +02:00
Torkel Ödegaard
ae7093e0bb feat(elasticsearch): small progress on new elasticsearch metric query capability 2015-09-02 17:45:41 +02:00
Torkel Ödegaard
d932a877e8 fix(influxdb): removed limit of 20 for influxdb field dropdown, fixes #2655 2015-09-02 16:10:58 +02:00
Torkel Ödegaard
745a7b4461 fix(influxdb): Fixed issue when using the eye to disable queries in the query editor and when applying aliases, #2651 2015-09-02 16:06:28 +02:00
Torkel Ödegaard
9e1a9723c2 Merge branch 'cloudwatch' into elastic_ds 2015-09-02 15:25:45 +02:00
Torkel Ödegaard
e48754c73c feat(elasticsearch): began work on elasticsearch datasource, based on work in pr #2293, will need a lot more work 2015-09-02 15:25:40 +02:00
Torkel Ödegaard
096a554bfb Merge branch 'elasticsearch-datasource' of https://github.com/aheinz-sg/grafana into elastic_ds 2015-09-02 15:12:26 +02:00
Torkel Ödegaard
74da5a610c fix(spelling): capitalize text 2015-09-02 13:20:19 +02:00
Torkel Ödegaard
ea7c6edcd0 feat(cloudwatch): lots of refactoring and polish work on cloudwatch query editor 2015-09-02 12:40:08 +02:00
Torkel Ödegaard
27d5f02329 refactoring(query editors): broke out metric segment 2015-09-02 11:33:32 +02:00
Torkel Ödegaard
d56e7787f2 Merge pull request #2637 from matschaffer/env-var-output-fix
Iterate over the right env override list variable
2015-09-02 11:32:49 +02:00
Torkel Ödegaard
6d7c8431be fix(inspector): lots of improvements and fixes for the error inspector, now shows you request details and responses in many more cases, fixes #2646 2015-09-02 10:35:15 +02:00
Torkel Ödegaard
3ed63d09d9 Merge branch 'master' into cloudwatch 2015-09-01 17:25:36 +02:00
Torkel Ödegaard
4b4299604b fix(import): fixed nav link in header, fixes #2633 2015-09-01 17:14:48 +02:00
Torkel Ödegaard
3842bcb921 fix(influxdb): quote field name, fixes #2629 2015-09-01 14:49:42 +02:00
Torkel Ödegaard
f9b98767e7 docs(reference): removed link to non existant text panel page, fixes #2632 2015-09-01 13:07:42 +02:00
Torkel Ödegaard
6989c6332d fix(influxdb): fixed issue in last commit 2015-09-01 13:04:59 +02:00
Torkel Ödegaard
209e9ebda7 fix(influxdb): fixes and refactorings of influxdb 0.9 editor, no longer shows template vars in keys dropdown and group by dropdownm, fixes #2636 2015-09-01 13:01:21 +02:00
Torkel Ödegaard
e27e9dc2aa Merge pull request #2642 from bfontaine/patch-1
influxdb doc: typo fixed
2015-09-01 12:59:19 +02:00
Baptiste Fontaine
28474d9340 influxdb doc: typo fixed 2015-09-01 12:47:26 +02:00
Torkel Ödegaard
aa89416bca fix(invite): fixes to org invite stuff, #2630 2015-09-01 12:35:06 +02:00
Mat Schaffer
6c04ee1abd Iterate over the right env override list variable 2015-09-01 11:11:54 +09:00
Torkel Ödegaard
e93fba206d fix(kariosdb): fixed how kairosdb makes datasource requests, fixes #2628 2015-08-31 19:13:45 +02:00
Torkel Ödegaard
822a689b82 datasource(cloudwatch): began on polishing cloudwatch datasource, #684, #2445 2015-08-31 19:07:25 +02:00
Torkel Ödegaard
ffbf70af25 Merge branch 'cloudwatch' of https://github.com/mtanda/grafana into cloudwatch 2015-08-31 15:45:49 +02:00
Adam Heinz
d3e307b102 Refactor post-rebase to configure directives. 2015-08-31 09:32:47 -04:00
Adam Heinz
a3e4abfd5e Added group by to Elasticsearch data source. 2015-08-31 09:04:56 -04:00
Adam Heinz
56d1411253 Replace deprecated facets with aggregations. 2015-08-31 08:53:49 -04:00
Adam Heinz
7bccd17bbe Enable 'Test Connection' for Elasticsearch datasource. 2015-08-31 08:53:49 -04:00
Adam Heinz
c193208cd2 Rewrite query builder to allow for multiple time series. 2015-08-31 08:53:49 -04:00
Joseph Jones
923f9345a7 add elasticsearch query fields to the es query editor 2015-08-31 08:53:49 -04:00
Adam Heinz
d618526037 Copy/paste influxdb query editor. 2015-08-31 08:53:49 -04:00
Adam Heinz
a1dcd5f069 Initial prototype returning time series from (partially hardcoded) Elasticsearch data source. 2015-08-31 08:53:29 -04:00
Torkel Ödegaard
3dfa28570f ui(timepicker): slight polish to the time picker options view 2015-08-31 14:12:10 +02:00
Torkel Ödegaard
d78c9fa2d2 feat(signup): updated changelog with details about #2353 2015-08-31 11:47:02 +02:00
Torkel Ödegaard
6fac241404 Merge branch 'signup_remake' 2015-08-31 11:42:30 +02:00
Torkel Ödegaard
99bb9d4fcf feat(signup): added back the welcome on signup completed email 2015-08-31 11:42:12 +02:00
Torkel Ödegaard
d19e101e6b feat(signup): almost done with new sign up flow, #2353 2015-08-31 11:35:07 +02:00
Torkel Ödegaard
13c70ac02c feat(signup): selecting org after invite now works 2015-08-31 09:37:14 +02:00
Mitsuhiro Tanda
3405e44a1d add legend format tip 2015-08-31 16:03:39 +09:00
Mitsuhiro Tanda
8c5a28c0b8 improve dollar escape 2015-08-31 16:03:28 +09:00
Mitsuhiro Tanda
7229c59b8e escape {} in tip 2015-08-31 16:02:59 +09:00
Torkel Ödegaard
14884d5a2b feat(signup): progress on new signup flow, #2353 2015-08-30 18:56:53 +02:00
Torkel Ödegaard
688ed405df fix(graph tooltip): multi series tooltip did no highlight correct point when stacking was enabled and series were of different resolution, fixes #2620 2015-08-30 11:11:43 +02:00
Torkel Ödegaard
d3c79c9b49 fix(datasource query editors): fixed issue with duplicate query and the query letter (refId) 2015-08-30 10:24:15 +02:00
Torkel Ödegaard
de0f04ec3c feat(signup): progress on new sign up and email verification flow, #2353 2015-08-28 15:14:24 +02:00
Torkel Ödegaard
c61b22cefb feat(signup): progress on new sign up and email verification flow, #2353 2015-08-28 13:45:16 +02:00
Torkel Ödegaard
24dfa55465 feat(signup): progress on new sign up and email verification flow, #2353 2015-08-28 09:24:30 +02:00
Torkel Ödegaard
d25624a8ad feat(signup): began work on new / alternate signup flow that includes email verification, #2353 2015-08-27 13:59:58 +02:00
Torkel Ödegaard
41f1e5f7c9 Merge pull request #2605 from matschaffer/patch-1
Use SPDB license id
2015-08-27 08:49:52 +02:00
Mat Schaffer
be3f657073 Use SPDB license id
This avoids an npm warning telling you to use such an id.
2015-08-27 07:35:59 +09:00
Torkel Ödegaard
7e44a8ed1d feat(tagmanager): added Name variable to datalayer 2015-08-26 15:07:39 +02:00
Torkel Ödegaard
874503ca68 fix(ngblur): removed ngblur directive as it conflicted with angular naitive directive, fixes #2429 2015-08-26 13:05:33 +02:00
Torkel Ödegaard
29c833d623 Merge pull request #2598 from robison/master
updating the limit of returned tagvs in a MetricKeyLookup
2015-08-26 09:48:44 +02:00
robbie
46c0215b1f updating the limit of returned tagvs in a MetricKeyLookup since opentsdb supports limit in 2.1.0 2015-08-25 12:46:54 -07:00
Mitsuhiro Tanda
7c6e49ec65 fix too much CloudWatch query 2015-08-26 00:31:59 +09:00
Torkel Ödegaard
9a142cea7a fix(panel links): fixed panel height issue when using panel links, could cause strange panel flow, fixes #2576 2015-08-25 15:49:46 +02:00
Torkel Ödegaard
7b911aea46 fix(shutdown flow): improved shutdown flow and log closing, listing to kill and and SIGTERM as well, closes #2516 2015-08-25 15:22:24 +02:00
Torkel Ödegaard
af95daadf4 fix(jscs): fixed failing js style check 2015-08-25 09:39:42 +02:00
Torkel Ödegaard
6e8d5cd873 fix(opentsdb): blur event triggered twice for metric selector, caused double query to opentsdb after metric name change 2015-08-25 09:34:43 +02:00
Torkel Ödegaard
48686cf9f7 fix(influxdb_09): fix for handling empty series object in response from influxdb, fixes #2413 2015-08-25 09:11:39 +02:00
Mitsuhiro Tanda
b7fc3059b6 add legend format 2015-08-25 15:51:32 +09:00
Mitsuhiro Tanda
64ce5e0fad add tooltip 2015-08-25 13:35:27 +09:00
Mitsuhiro Tanda
d815d06c1c ignore empty custom metrics setting 2015-08-24 22:13:53 +09:00
Mitsuhiro Tanda
c0c8465ec2 add loading custom metrics definitions 2015-08-24 22:06:45 +09:00
Torkel Ödegaard
364d9de751 feat(ui viewer): prevent viewers from creating new dashboard or importing dashboard, closes #2590 2015-08-24 14:24:10 +02:00
Torkel Ödegaard
ca5e8c73d7 logging(ldap): added more logging to bind failures, #2588 2015-08-24 11:47:22 +02:00
Torkel Ödegaard
3e0c66edab docs(): updated download links in docs 2015-08-24 10:05:05 +02:00
Mitsuhiro Tanda
e74be5887c don't require cloudwatch dimension 2015-08-24 11:38:52 +09:00
Mitsuhiro Tanda
253d0c834c change required option by access mode 2015-08-23 14:54:08 +09:00
Mitsuhiro Tanda
feb19adb8f fix checkbox 2015-08-23 14:53:58 +09:00
Torkel Ödegaard
5d69c69b7c Merge pull request #2586 from thuck/link_typo
Fix small typo "dashbord" to dashboard
2015-08-22 14:48:13 +02:00
Denis Doria
06077faa6a Fix small typo "dashbord" to dashboard 2015-08-22 11:48:35 +02:00
Torkel Ödegaard
fcc369e854 Merge branch 'v2.1.x'
Conflicts:
	CHANGELOG.md
2015-08-22 08:44:47 +02:00
Torkel Ödegaard
ea187961da fix(templating): Another atempt at fixing #2534 (Init multi value template var used in repeat panel from url), fixes #2564
Conflicts:
	CHANGELOG.md
2015-08-22 08:43:43 +02:00
Torkel Ödegaard
c9b2cec5ff fix(packaging): deb & rpm package did not mark ldap.toml config file as a configuration file, 2.1.1 & 2.1.2 upgrade overwrote it :(, fixes #2580 2015-08-22 08:34:20 +02:00
Torkel Ödegaard
960a4f71a3 fix(jsc): fixed javascript indenting 2015-08-21 16:28:01 +02:00
Torkel Ödegaard
292db86c9e feat(relative time override): You can now use the new relative time option in panel time override, closes #2575 2015-08-21 16:19:51 +02:00
Torkel Ödegaard
6d3b36d61b fix(annotations): removed accidental test code from annotationsSrv 2015-08-21 15:32:20 +02:00
Torkel Ödegaard
c876aa537a feat(panel resize): Resize handles in panel bottom right corners for easy width and height change, closes #2577
Conflicts:
	public/app/features/panel/panelDirective.js
2015-08-21 14:59:15 +02:00
Mitsuhiro Tanda
f1e5238e16 escape dimension if it is variable name 2015-08-21 20:15:45 +09:00
Mitsuhiro Tanda
d75d4a5c08 filter empty dimension 2015-08-21 18:29:38 +09:00
Mitsuhiro Tanda
266fe876ba pass dimension in metricFindQuery 2015-08-21 18:29:32 +09:00
Torkel Ödegaard
ca53ae4fc5 fix(templating): Another atempt at fixing #2534 (Init multi value template var used in repeat panel from url), fixes #2564 2015-08-21 10:40:14 +02:00
Torkel Ödegaard
8f35683ccb fix(annotations): Fixed issue when html sanitizer failes for title to annotation body, now fallbacks to html escaping title and text, fixes #2563 2015-08-21 10:17:02 +02:00
Torkel Ödegaard
30cd782e92 fix(snapshot): Fix for snapshot with expire 7 days option, 7 days option not correct, was 7 hours, fixes #2574 2015-08-21 09:51:16 +02:00
Torkel Ödegaard
3d37c9c9a3 feat(tagmanager): support to add google tagmanager id, closes #2569 2015-08-21 09:30:39 +02:00
Torkel Ödegaard
7072af7c14 fix(auth proxy): Fix for server side rendering of panel when using auth proxy, fixes #2568 2015-08-21 07:49:49 +02:00
Mitsuhiro Tanda
c72bddf15e add cloudwatch datasource test 2015-08-21 12:55:18 +09:00
Torkel Ödegaard
abd7c15ba8 fix(TimePicker): Fix for when you applied custom time range it did not refreh dashboard, fixes #2565 2015-08-20 17:57:15 +02:00
Torkel Ödegaard
a4ff1e4f33 docs(): updated version in install docs 2015-08-20 15:20:26 +02:00
Torkel Ödegaard
5b9ea96dc5 Merge branch 'v2.1.x'
Conflicts:
	CHANGELOG.md
	package.json
2015-08-20 11:16:05 +02:00
Torkel Ödegaard
e86c87f7bc updated version to v2.1.2 2015-08-20 11:12:32 +02:00
Torkel Ödegaard
e5794ed1c1 fix(dragdrop): Fix for broken drag drop behavior, fixes #2558 2015-08-20 11:12:06 +02:00
Mitsuhiro Tanda
269441fe3d add aws-sdk 2015-08-20 00:51:30 +09:00
Mitsuhiro Tanda
00f76ecaf6 CloudWatch proxy support 2015-08-20 00:51:23 +09:00
Torkel Ödegaard
952a69abca docs(tutorial): updated hubot tutorial 2015-08-19 13:53:15 +02:00
Torkel Ödegaard
aa21bfd8a8 fix(timepicker): fixed for viewing auto refresh submenu when timpicker selection is set to Today, fixes #2552 2015-08-19 13:25:36 +02:00
Torkel Ödegaard
5768762591 Merge pull request #2539 from papylhomme/master
Disable load timeout in requirejs
2015-08-18 20:23:47 +02:00
Torkel Ödegaard
ca9c11486e config(github oauth): removed allowed_domains options, closes #1986 2015-08-18 20:19:37 +02:00
Torkel Ödegaard
01d9849e44 fix(user create): fixed for creating multiple users with empty email when auto assign org is set to false, fixes #2011 2015-08-18 20:15:24 +02:00
Torkel Ödegaard
0339026674 fix(default datasource): minor fix for handling of default datasource 2015-08-18 20:04:24 +02:00
Trent White
2f5115ca45 Update hubot_howto.md 2015-08-18 09:37:51 -04:00
Trent White
66878a9b48 Update hubot_howto.md 2015-08-18 09:34:31 -04:00
Trent White
333952a231 Update hubot_howto.md 2015-08-18 09:34:08 -04:00
Torkel Ödegaard
50281f12b9 fix(default datasource): minor fix for handling of default datasource 2015-08-18 14:54:56 +02:00
Torkel Ödegaard
4f8cea6e2d docs(): added tutorial article on hubot and grafana integration 2015-08-18 14:39:04 +02:00
Mitsuhiro Tanda
1c6b7203cc add template variable to drop down list 2015-08-18 20:35:12 +09:00
Torkel Ödegaard
bf5081ec8c fix(type ahead): fixed highlight, and made the highlight more distinct, also rolled back #2436 due to its issues on linux and windows, fixes #2527 2015-08-18 10:09:12 +02:00
Torkel Ödegaard
4f7587492d docs(changelog): fixed minor wording issue in changelog 2015-08-18 08:24:08 +02:00
Torkel Ödegaard
9a9c9b2b12 Merge branch 'query-editor-breakout'
Conflicts:
	CHANGELOG.md
2015-08-18 08:23:13 +02:00
Torkel Ödegaard
d4432ddd64 feat(mixed datasources): feature ready to merge to master, closes #436 2015-08-18 08:22:00 +02:00
Torkel Ödegaard
5c2d49f7ce feat(mixed datasource): updated Elasticsearch so it uses new way to define annotations editor 2015-08-18 07:58:44 +02:00
Mitsuhiro Tanda
28ccd63255 fix templating 2015-08-18 13:29:17 +09:00
Michaël Lhomme
0dc263d919 Disable load timeout in requirejs 2015-08-17 21:41:04 +02:00
Torkel Ödegaard
3de041a411 feat(mixed datasource): added datasource name to left of query hamburger #436 2015-08-17 21:25:08 +02:00
Torkel Ödegaard
00f5f8b2a0 feat(mixed datasource): polishing feature, making sure everything works, #436 2015-08-17 21:20:09 +02:00
Torkel Ödegaard
95f1343a59 feat(mixed datasource): fixing varios issues with the query editor changes, updated kariosdb data source editor to work with the new model, #436 2015-08-17 20:53:40 +02:00
Torkel Ödegaard
b4115b0362 feat(query editor): updated influxdb 0.8.x data source query editors to new abstraction 2015-08-17 17:28:03 +02:00
Torkel Ödegaard
6ee0f2c6a7 feat(mixed data source queries): lots of minor polish to new mixed data source and all the changes it has required, #436 2015-08-17 17:07:33 +02:00
Torkel Ödegaard
af39e4de3e Merge branch 'v2.1.x'
Conflicts:
	CHANGELOG.md
2015-08-17 15:02:47 +02:00
Torkel Ödegaard
0d8303cf5d fix(templating): fix for setting template variable value via url and having repeated panels or rows, fixes #2534 2015-08-17 15:01:16 +02:00
Torkel Ödegaard
4a6b5274bc feat(invite): fixes for org invite enhancement story, #2353 2015-08-17 10:55:52 +02:00
Torkel Ödegaard
f7ea420a3f Merge pull request #2521 from mattttt/email-tweaks
Email tweaks
2015-08-17 10:33:06 +02:00
Torkel Ödegaard
56d5b0b12a feat(mixed datasource): minor progress 2015-08-17 10:31:54 +02:00
Torkel Ödegaard
bb81248eaa Merge pull request #2528 from thuck/permissions
Fixing permission issues for new installations
2015-08-17 10:13:12 +02:00
Torkel Ödegaard
0e18eafcfb Merge pull request #2535 from raintank/logging
close all existing loggers before re-initializing loggers. fixes #2533
2015-08-17 10:05:42 +02:00
woodsaj
c138f390ac close all existing loggers before re-initilizing loggers. fixes #2533 2015-08-17 15:59:40 +08:00
Torkel Ödegaard
a16c63a43e feat(mixed datasources): continued work on editor design change 2015-08-16 20:52:30 +02:00
Denis Doria
9e21a089d2 Fixing permission issues for new installations
The umask as 0027 will generates permissions like:
0640 - for files
0750 - for directories

This should solve the grafana.db being accesable by any user for new installations.
2015-08-16 18:43:27 +02:00
Torkel Ödegaard
3105215425 feat(mixed datasource): changed how query ref ids (letters) are assigned, now they are persisted 2015-08-16 11:24:32 +02:00
Torkel Ödegaard
e916f93787 feat(mixed datasource): fixed failing unit tests 2015-08-16 10:06:36 +02:00
Torkel Ödegaard
1332ddbc93 feat(mixed datasources): updated OpenTSDB data source query editor to new model, #436 2015-08-16 09:52:45 +02:00
Torkel Ödegaard
b30dfcf28a feat(datasource): added new mixed data source 2015-08-16 01:34:09 +02:00
Torkel Ödegaard
b9cfade18c feat(mutli db query): major changes to query editor and data source handling, looks promising 2015-08-15 23:11:37 +02:00
Torkel Ödegaard
ad1fa110ff change(search): opening search dropdown should not take you out of fullscreen view or edit 2015-08-15 21:57:41 +02:00
Torkel Ödegaard
ed49962120 refactor(): began work on big design change for how data source query editors are loaded 2015-08-15 21:49:30 +02:00
Matt Toback
0651f134e4 Backed our new styles out of ink.css and moved them into style.css. Small margin tweaks, looking good to go. 2015-08-14 15:21:51 -04:00
Torkel Ödegaard
16fa5c4df3 fix(mysql): fix for migration in newly added temp_user table, fixes #2509 2015-08-14 09:41:07 +02:00
Torkel Ödegaard
2f849be9d8 docs(): minor tweaks to new tutorial 2015-08-14 09:41:07 +02:00
Torkel Ödegaard
6c49ab932e Merge pull request #2511 from pivotal-cloudops/patch
fix test connection bug
2015-08-14 09:23:14 +02:00
Pivotal
dc9c2773cb fix test connection bug 2015-08-14 13:50:01 +08:00
Matt Toback
60eec49e95 Added in additional text and incorporated the bulletproof button that was built for raintank. Needs some help from Torkel or Nick to make sure it's cleaner 2015-08-13 17:57:58 -04:00
Torkel Ödegaard
f784551e20 docs(): Began work on stack install and config tutorial 2015-08-13 08:52:50 +02:00
Torkel Ödegaard
a3d9169930 docs(): minor update 2015-08-13 08:00:27 +02:00
Torkel Ödegaard
e87502285b Merge pull request #2502 from mtanda/stack_override_tooltip
don't calculate cumulative value if series overrides stack to false
2015-08-13 07:44:27 +02:00
Mitsuhiro Tanda
caccacf52b don't calculate cumulative value if series overrides stack to false 2015-08-13 13:40:16 +09:00
Torkel Ödegaard
5aee981590 Merge branch 'master' of https://github.com/mattttt/grafana
Conflicts:
	docs/sources/installation/debian.md
	docs/sources/installation/ldap.md
2015-08-12 16:30:48 +02:00
Torkel Ödegaard
e6d09b3266 fix(db): remove stars and tags when removing user or dashboard, fixes #2016 2015-08-12 09:23:46 +02:00
Torkel Ödegaard
8fcaa4997d feat(admin): Deleting org from orgs list now works, will permanently delete dashboards, data sources, etc, closes #2457 2015-08-12 08:59:39 +02:00
Torkel Ödegaard
7370af9a82 Merge pull request #2492 from agilgur5/master
docs(ldap): Clarify LDAP features and sample config
2015-08-12 08:20:00 +02:00
Anton Gilgur
e4baef94bc docs(ldap): Clarify LDAP features and sample config
* Clarify certain features, such as multiple mappings
* Fixup ldap.toml sample config
* Fixup docs README's port number
* Fixup bad link from LDAP docs to Configuration docs
* Fixup some spelling, grammar, and line endings
2015-08-11 19:57:53 -07:00
Trent White
8c05f9cf3e Merge pull request #11 from mattttt/trent-update
add text panel page
2015-08-11 16:52:15 -04:00
Trent White
ebcab0f26e add text panel page 2015-08-11 16:50:33 -04:00
Matt
14996bd08b Update templating.md 2015-08-11 15:57:36 -04:00
Matt
dd9fb9d526 Update templating.md 2015-08-11 15:56:18 -04:00
Matt
f3ffed673b Merge pull request #10 from mattttt/build-patch-1
Reformatted a confusing part of of the running grafana locally section
2015-08-11 15:47:38 -04:00
Matt Toback
5b342dd229 Reformatted a confusing part of of the running grafana locally section 2015-08-11 15:41:12 -04:00
Trent White
e626bbf415 Merge pull request #9 from mattttt/trent-update
formatting tweak, added datasource overview blurb.
2015-08-11 15:31:03 -04:00
Trent White
04795709a3 formatting tweak, added datasource overview blurb. 2015-08-11 15:30:13 -04:00
Torkel Ödegaard
745162c589 fix(graphite): Import dashboard from graphite is working again, fixes #2490 2015-08-11 21:20:20 +02:00
Torkel Ödegaard
089508db8e fix(influxdb 0.9): removed function from function dropdown as this function does not exist in InfluxDB 0.9 2015-08-11 21:10:15 +02:00
Matt
a912323d01 Merge pull request #8 from mattttt/ct-additions
Added patch from ct
2015-08-11 14:59:59 -04:00
Matt Toback
511a592ec8 Added patch from ct 2015-08-11 14:58:30 -04:00
Matt
d132d3abfd Merge pull request #7 from mattttt/mattttt-docs
Mattttt docs
2015-08-11 14:41:21 -04:00
Trent White
fb9b5e4274 Merge pull request #6 from mattttt/trent-docs-update
openTSDB image was broken
2015-08-11 14:32:03 -04:00
Trent White
8dcc5645db openTSDB image was broken 2015-08-11 14:26:32 -04:00
raj dutt
c6798892b8 Update screencasts.md
tweaks
2015-08-11 14:26:04 -04:00
Matt Toback
eea5b026ce Sanity checked and verified for merge 2015-08-11 14:17:56 -04:00
raj dutt
331e858ed3 Update gettingstarted.md 2015-08-11 14:04:14 -04:00
Trent White
3d602a2558 Merge remote-tracking branch 'origin/master' 2015-08-11 14:01:03 -04:00
Trent White
14386a2f62 Merge branch 'trent-docs'
Conflicts:
	docs/sources/reference/templating.md
2015-08-11 13:59:46 -04:00
raj dutt
4e91e05470 Update gettingstarted.md 2015-08-11 13:58:44 -04:00
Trent White
ea0bec8bf2 update content on annotations 2015-08-11 13:51:27 -04:00
raj dutt
c2e8a1d733 Update singlestat.md 2015-08-11 13:49:24 -04:00
raj dutt
c6155723a1 Update playlist.md 2015-08-11 13:43:36 -04:00
Trent White
8c64afc81e update to templating doc 2015-08-11 13:42:40 -04:00
raj dutt
eafd1d8c53 Update templating.md 2015-08-11 13:33:01 -04:00
raj dutt
df0673f907 Update templating.md 2015-08-11 13:31:49 -04:00
Torkel Ödegaard
fe093c6385 feat(timepicker): added new relative time option , will set time range to midnight to now, closes #1186 2015-08-11 19:16:45 +02:00
raj dutt
ed536f18ee Update basic_concepts.md 2015-08-11 13:07:30 -04:00
Matt Toback
96590f591c Updates to dashlist article 2015-08-11 12:34:18 -04:00
Trent White
32bfcbcdbc updates to screencasts page, with descriptions of each 2015-08-11 11:43:45 -04:00
Torkel Ödegaard
82feeff3aa tech(emails): small update to grunt watch for emails 2015-08-11 16:40:23 +02:00
Trent White
3af451caeb Merge pull request #4 from mattttt/trent-docs
added pages, touched up some images and content  in data-sources section
2015-08-11 10:15:59 -04:00
Trent White
ae953f2420 added pages, touched up some images and content in data-sources section 2015-08-11 10:12:58 -04:00
Torkel Ödegaard
43ef9f909a feat(admin): admin page for all grafana organizations (list / edit view), #2457 2015-08-11 15:20:50 +02:00
Mitsuhiro Tanda
4f42dae3cb change behavior of dimension value suggestion 2015-08-11 20:43:34 +09:00
Torkel Ödegaard
e01c68dcea fix(logging): removed db connection string from being printed in logs at app startup, fixes #2488 2015-08-11 11:26:28 +02:00
Torkel Ödegaard
234d1291f9 Merge branch 'invite'
Conflicts:
	public/css/less/gfbox.less
	public/emails/reset_password.html
	public/emails/welcome_on_signup.html
2015-08-11 11:22:43 +02:00
Torkel Ödegaard
90169d6a05 Bumped master version to 2.2.0-pre1 2015-08-11 11:18:39 +02:00
Torkel Ödegaard
809562c874 updated install docs for 2.1.1 release 2015-08-11 11:12:06 +02:00
Torkel Ödegaard
1ea0b5371a feat(invite): new user invites are now also added to correct org after sign up is completed, #2353 2015-08-11 10:45:03 +02:00
Torkel Ödegaard
74932e6311 feat(invite): added specific email for invites to existing grafana users 2015-08-11 10:35:10 +02:00
Torkel Ödegaard
b0b96aa410 Moved reset password and welcome on sign up email to new email build template system 2015-08-11 10:01:52 +02:00
Mitsuhiro Tanda
deedd166db add metricFindQuery 2015-08-11 14:26:24 +09:00
Mitsuhiro Tanda
6ffab821b2 refactor suggest function 2015-08-11 14:26:18 +09:00
Matt
e53c8100a3 Merge pull request #3 from mattttt/mattttt-docs
Adding additional pages and updates from google doc
2015-08-10 23:20:15 -04:00
Mitsuhiro Tanda
6705902a94 fix style 2015-08-11 10:17:48 +09:00
Mitsuhiro Tanda
fd96e30c9c add region field in query editor 2015-08-11 10:17:39 +09:00
Matt Toback
2b59724ea8 Adding additional pages and updates from google doc 2015-08-10 18:19:56 -04:00
Trent White
aa4d6d6f0f Merge pull request #2 from mattttt/trent-docs
add basic concpets doc
2015-08-10 17:52:38 -04:00
Trent White
ca65671553 add basic concpets doc 2015-08-10 17:51:29 -04:00
Trent White
2d99308219 Merge pull request #1 from mattttt/trent-docs
adding Raj edits/additions as well as initial round of pages I created
2015-08-10 17:46:51 -04:00
Trent White
a4be75f410 adding Raj edits/additions as well as initial round of pages I created 2015-08-10 17:44:08 -04:00
Torkel Ödegaard
4439545428 feat(invite): lots of work completing the new email template system and css inlineing, converted new_user_invite.html to new system, #2353 2015-08-10 21:24:47 +02:00
Torkel Ödegaard
dfd1bff389 feat(invite): began work on email template build system, and css inlining 2015-08-10 17:50:02 +02:00
Torkel Ödegaard
e53c1e39d3 feat(invite): can now add org user with sername again, #2353 2015-08-10 14:03:08 +02:00
Torkel Ödegaard
775e044e69 feat(invite): progress on invite feature, #2353 2015-08-10 13:47:06 +02:00
Mitsuhiro Tanda
6a697eed2f fix deactivating statistics bug 2015-08-06 14:41:06 +09:00
Mitsuhiro Tanda
134819cb45 fix jshint error 2015-08-06 00:05:40 +09:00
Mitsuhiro Tanda
88ce05976e add CloudWatch datasource 2015-08-05 23:10:32 +09:00
Torkel Ödegaard
93b8287d23 Merge pull request #2437 from mattttt/invite
Added some small styling tweaks to the modals, and adjusted some copy.
2015-08-05 10:10:57 +02:00
Matt Toback
b64c550989 Added some small styling tweaks to the modals, and adjusted some copy. 2015-08-04 15:58:07 -04:00
Torkel Ödegaard
9d25d2674b fix(invite): fixed link in email 2015-08-04 20:23:04 +02:00
Torkel Ödegaard
5a160f426e feat(invite): trying to get username to work as well 2015-07-29 09:30:23 +02:00
Torkel Ödegaard
6d6af09296 feat(invite): handling of existing org user case when inviting, #2353 2015-07-21 12:18:11 +02:00
Torkel Ödegaard
ab54971763 feat(invite): more progress on completing invite form and actually creating a real user, #2353 2015-07-20 17:46:48 +02:00
Torkel Ödegaard
d75f96fdd5 feat(invite): more progress on invited / sigup view, #2353 2015-07-20 15:52:49 +02:00
Torkel Ödegaard
024c112944 feat(invite): redesign for pending invite list, added revoke button and link, copy invite also works now, #2353 2015-07-20 14:26:49 +02:00
Torkel Ödegaard
3242354a4b feat(invite): worked on pending invitations list, revoke invite now works, #2353 2015-07-20 10:57:39 +02:00
Torkel Ödegaard
4ac652b127 feat(invite): began work on invited signup view, also added backdrop to login view, #2353 2015-07-19 12:34:03 +02:00
Torkel Ödegaard
6088f83408 feat(invite): inital pass on sending new user invite email, #2353 2015-07-18 17:39:12 +02:00
Torkel Ödegaard
e92f2ecea1 feat(invite): existing grafana users now result in new org user directly, no temp user is created, #2353 2015-07-18 11:43:34 +02:00
Torkel Ödegaard
a82aa8203b Merge branch 'master' into invite 2015-07-18 10:14:45 +02:00
Torkel Ödegaard
2c7e807738 feat(invite): small style change 2015-07-17 17:22:42 +02:00
Torkel Ödegaard
2724cf5db8 feat(invite): small progress 2015-07-17 14:42:49 +02:00
Torkel Ödegaard
0ffcce1b5d feat(invite): more work on invite, basic creation works, added new tab directive from angular-ui and made new tab style, #2353 2015-07-17 09:51:34 +02:00
Torkel Ödegaard
444807c35b feat(invite): worked on db & domain model for temp users, #2353 2015-07-16 17:59:11 +02:00
Torkel Ödegaard
c3a5822a40 feat(user invite): progress on user invite feature, #2353 2015-07-16 12:38:55 +02:00
Torkel Ödegaard
57c78bc603 feat(invite): began work on invite users dialog, #2353 2015-07-16 10:44:55 +02:00
Mike Kobyakov
f76374cd8f fix testDatasource, which was calling performSuggestQuery instead of _performSuggestQuery 2015-07-15 12:03:22 -07:00
Mike Kobyakov
eb88a53223 fix for a change in datasource object 2015-07-10 16:09:08 -07:00
Mike Kobyakov
76c18e50a4 Merge remote-tracking branch 'upstream/master'
Conflicts:
	public/app/plugins/datasource/opentsdb/datasource.js
2015-07-10 15:55:45 -07:00
Torkel Ödegaard
2dbb370955 Working on resize handle, drag to resize panels & rows 2015-06-26 18:45:23 +02:00
Mike Kobyakov
0b05f88543 Merge remote-tracking branch 'mychanges/master' 2015-04-22 12:06:32 -07:00
Mike Kobyakov
e395211654 Instead of hard-coding the OpenTsdb aggregators list, pull the supported
aggregators from the datasource directly.
2015-04-22 12:02:45 -07:00
Mike Kobyakov
b01b121a4b Instead of hard-coding the OpenTsdb aggregators list, pull the supported
aggregators from the datasource directly.
2015-03-30 10:17:45 -07:00
1406 changed files with 117494 additions and 10976 deletions

View File

@@ -9,7 +9,7 @@ watch_dirs = [
"$WORKDIR/public/views",
"$WORKDIR/conf",
]
watch_exts = [".go", "conf/*"]
watch_exts = [".go", ".ini", ".toml", ".html"]
build_delay = 1500
cmds = [
["go", "build", "-o", "./bin/grafana-server"],

12
.editorconfig Normal file
View File

@@ -0,0 +1,12 @@
# http://editorconfig.org
root = true
[*]
indent_style = space
indent_size = 2
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
[*.md]
trim_trailing_whitespace = false

7
.gitignore vendored
View File

@@ -1,9 +1,13 @@
node_modules
npm-debug.log
coverage/
.aws-config.json
awsconfig
/dist
/emails/dist
/public_gen
/tmp
vendor/phantomjs/phantomjs
docs/AWS_S3_BUCKET
docs/GIT_BRANCH
@@ -27,4 +31,5 @@ public/css/*.min.css
conf/custom.ini
fig.yml
profile.cov
grafana
.notouch

View File

@@ -23,7 +23,7 @@
"laxcomma": true,
"sub": true,
"unused": true,
"maxdepth": 5,
"maxdepth": 6,
"maxlen": 140,
"globals": {
@@ -32,4 +32,4 @@
"Chromath": false,
"setImmediate": true
}
}
}

View File

@@ -1,4 +1,100 @@
# 2.1.1 (unreleased)
# 2.6.0 (unreleased)
### New Table Panel
* **table**: New powerful and flexible table panel, closes [#215](https://github.com/grafana/grafana/issues/215)
### Enhancements
* **CloudWatch**: Support for multiple AWS Credentials, closes [#3053](https://github.com/grafana/grafana/issues/3053), [#3080](https://github.com/grafana/grafana/issues/3080)
* **Elasticsearch**: Support for dynamic daily indices for annotations, closes [#3061](https://github.com/grafana/grafana/issues/3061)
* **Elasticsearch**: Support for setting min_doc_count for date histogram, closes [#3416](https://github.com/grafana/grafana/issues/3416)
* **Graph Panel**: Option to hide series with all zeroes from legend and tooltip, closes [#1381](https://github.com/grafana/grafana/issues/1381), [#3336](https://github.com/grafana/grafana/issues/3336)
### Bug Fixes
* **cloudwatch**: fix for handling of period for long time ranges, fixes [#3086](https://github.com/grafana/grafana/issues/3086)
* **dashboard**: fix for collapse row by clicking on row title, fixes [#3065](https://github.com/grafana/grafana/issues/3065)
* **influxdb**: fix for relative time ranges `last x months` and `last x years`, fixes [#3067](https://github.com/grafana/grafana/issues/3067)
* **graph**: layout fix for color picker when right side legend was enabled, fixes [#3093](https://github.com/grafana/grafana/issues/3093)
* **elasticsearch**: disabling elastic query (via eye) caused error, fixes [#3300](https://github.com/grafana/grafana/issues/3300)
### Breaking changes
* **elasticsearch**: Manual json edited queries are not supported any more (They very barely worked in 2.5)
# 2.5 (2015-10-28)
**New Feature: Mix data sources**
- A built in data source is now available named `-- Mixed --`, When picked in the metrics tab,
it allows you to add queries of differnet data source types & instances to the same graph/panel!
[Issue #436](https://github.com/grafana/grafana/issues/436)
**New Feature: Elasticsearch Metrics Query Editor and Viz Support**
- Feature rich query editor and processing features enables you to issues all kind of metric queries to Elasticsearch
- See [Issue #1034](https://github.com/grafana/grafana/issues/1034) for more info.
**New Feature: New and much improved time picker**
- Support for quick ranges like `Today`, `This day last week`, `This week`, `The day so far`, etc.
- Improved UI and improved support for UTC, [Issue #2761](https://github.com/grafana/grafana/issues/2761) for more info.
**User Onboarding**
- Org admin can now send email invites (or invite links) to people who are not yet Grafana users
- Sign up flow now supports email verification (if enabled)
- See [Issue #2353](https://github.com/grafana/grafana/issues/2353) for more info.
**Other new Features && Enhancements**
- [Pull #2720](https://github.com/grafana/grafana/pull/2720). Admin: Initial basic quota support (per Org)
- [Issue #2577](https://github.com/grafana/grafana/issues/2577). Panel: Resize handles in panel bottom right corners for easy width and height change
- [Issue #2457](https://github.com/grafana/grafana/issues/2457). Admin: admin page for all grafana organizations (list / edit view)
- [Issue #1186](https://github.com/grafana/grafana/issues/1186). Time Picker: New option `today`, will set time range from midnight to now
- [Issue #2647](https://github.com/grafana/grafana/issues/2647). InfluxDB: You can now set group by time interval on each query
- [Issue #2599](https://github.com/grafana/grafana/issues/2599). InfluxDB: Improved alias support, you can now use the `AS` clause for each select statement
- [Issue #2708](https://github.com/grafana/grafana/issues/2708). InfluxDB: You can now set math expression for select clauses.
- [Issue #1575](https://github.com/grafana/grafana/issues/1575). Drilldown link: now you can click on the external link icon in the panel header to access drilldown links!
- [Issue #1646](https://github.com/grafana/grafana/issues/1646). OpenTSDB: Fetch list of aggregators from OpenTSDB
- [Issue #2955](https://github.com/grafana/grafana/issues/2955). Graph: More axis units (Length, Volume, Temperature, Pressure, etc), thanks @greglook
- [Issue #2928](https://github.com/grafana/grafana/issues/2928). LDAP: Support for searching for groups memberships, i.e. POSIX (no memberOf) schemas, also multiple ldap servers, and root ca cert, thanks @abligh
**Fixes**
- [Issue #2413](https://github.com/grafana/grafana/issues/2413). InfluxDB 0.9: Fix for handling empty series object in response from influxdb
- [Issue #2574](https://github.com/grafana/grafana/issues/2574). Snapshot: Fix for snapshot with expire 7 days option, 7 days option not correct, was 7 hours
- [Issue #2568](https://github.com/grafana/grafana/issues/2568). AuthProxy: Fix for server side rendering of panel when using auth proxy
- [Issue #2490](https://github.com/grafana/grafana/issues/2490). Graphite: Dashboard import was broken in 2.1 and 2.1.1, working now
- [Issue #2565](https://github.com/grafana/grafana/issues/2565). TimePicker: Fix for when you applied custom time range it did not refreh dashboard
- [Issue #2563](https://github.com/grafana/grafana/issues/2563). Annotations: Fixed issue when html sanitizer failes for title to annotation body, now fallbacks to html escaping title and text
- [Issue #2564](https://github.com/grafana/grafana/issues/2564). Templating: Another atempt at fixing #2534 (Init multi value template var used in repeat panel from url)
- [Issue #2620](https://github.com/grafana/grafana/issues/2620). Graph: multi series tooltip did no highlight correct point when stacking was enabled and series were of different resolution
- [Issue #2636](https://github.com/grafana/grafana/issues/2636). InfluxDB: Do no show template vars in dropdown for tag keys and group by keys
- [Issue #2604](https://github.com/grafana/grafana/issues/2604). InfluxDB: More alias options, can now use `$[0-9]` syntax to reference part of a measurement name (seperated by dots)
**Breaking Changes**
- Notice to makers/users of custom data sources, there is a minor breaking change in 2.2 that
require an update to custom data sources for them to work in 2.2. [Read this doc](https://github.com/grafana/grafana/tree/master/docs/sources/datasources/plugin_api.md) for more on the
data source api change.
- Data source api changes, [PLUGIN_CHANGES.md](https://github.com/grafana/grafana/blob/master/public/app/plugins/PLUGIN_CHANGES.md)
- The duplicate query function used in data source editors is changed, and moveMetricQuery function was renamed
**Tech (Note for devs)**
Started using Typescript (transpiled to ES5), uncompiled typescript files and less files are in public folder (in source tree)
This folder is never modified by build steps. Compiled css and javascript files are put in public_gen, all other files
that do not undergo transformation are just copied from public to public_gen, it is public_gen that is used by grafana-server
if it is found.
Grunt & Watch tasks:
- `grunt` : default task, will remove public_gen, copy over all files from public, do less & typescript compilation
- `grunt watch`: will watch for changes to less, and typescript files and compile them to public_gen, and for other files it will just copy them to public_gen
# 2.1.3 (2015-08-24)
**Fixes**
- [Issue #2580](https://github.com/grafana/grafana/issues/2580). Packaging: ldap.toml was not marked as config file and could be overwritten in upgrade
- [Issue #2564](https://github.com/grafana/grafana/issues/2564). Templating: Another atempt at fixing #2534 (Init multi value template var used in repeat panel from url)
# 2.1.2 (2015-08-20)
**Fixes**
- [Issue #2558](https://github.com/grafana/grafana/issues/2558). DragDrop: Fix for broken drag drop behavior
- [Issue #2534](https://github.com/grafana/grafana/issues/2534). Templating: fix for setting template variable value via url and having repeated panels or rows
# 2.1.1 (2015-08-11)
**Fixes**
- [Issue #2443](https://github.com/grafana/grafana/issues/2443). Templating: Fix for buggy repeat row behavior when combined with with repeat panel due to recent change before 2.1 release
@@ -118,6 +214,10 @@
# 2.0.0-Beta1 (2015-03-30)
**Important Note**
Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated backend server. Please read the [Documentation](http://docs.grafana.org) for more detailed about this SIGNIFCANT change to Grafana
**New features**
- [Issue #1623](https://github.com/grafana/grafana/issues/1623). Share Dashboard: Dashboard snapshot sharing (dash and data snapshot), save to local or save to public snapshot dashboard snapshots.raintank.io site
- [Issue #1622](https://github.com/grafana/grafana/issues/1622). Share Panel: The share modal now has an embed option, gives you an iframe that you can use to embedd a single graph on another web site

62
Godeps/Godeps.json generated
View File

@@ -1,6 +1,6 @@
{
"ImportPath": "github.com/grafana/grafana",
"GoVersion": "go1.4.2",
"GoVersion": "go1.5",
"Packages": [
"./pkg/..."
],
@@ -18,10 +18,65 @@
"ImportPath": "github.com/Unknwon/macaron",
"Rev": "93de4f3fad97bf246b838f828e2348f46f21f20a"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws",
"Comment": "v0.10.4-18-gce51895",
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/endpoints",
"Comment": "v0.10.4-18-gce51895",
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/ec2query",
"Comment": "v0.10.4-18-gce51895",
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
"Comment": "v0.10.4-18-gce51895",
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
"Comment": "v0.10.4-18-gce51895",
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
"Comment": "v0.10.4-18-gce51895",
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/signer/v4",
"Comment": "v0.10.4-18-gce51895",
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/waiter",
"Comment": "v0.10.4-18-gce51895",
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatch",
"Comment": "v0.10.4-18-gce51895",
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/ec2",
"Comment": "v0.10.4-18-gce51895",
"Rev": "ce51895e994693d65ab997ae48032bf13a9290b7"
},
{
"ImportPath": "github.com/davecgh/go-spew/spew",
"Rev": "2df174808ee097f90d259e432cc04442cf60be21"
},
{
"ImportPath": "github.com/go-ini/ini",
"Comment": "v0-48-g060d7da",
"Rev": "060d7da055ba6ec5ea7a31f116332fe5efa04ce0"
},
{
"ImportPath": "github.com/go-ldap/ldap",
"Comment": "v1-19-g83e6542",
@@ -45,6 +100,11 @@
"ImportPath": "github.com/gosimple/slug",
"Rev": "8d258463b4459f161f51d6a357edacd3eef9d663"
},
{
"ImportPath": "github.com/jmespath/go-jmespath",
"Comment": "0.2.2",
"Rev": "3433f3ea46d9f8019119e7dd41274e112a2359a9"
},
{
"ImportPath": "github.com/jtolds/gls",
"Rev": "f1ac7f4f24f50328e6bc838ca4437d1612a0243c"

View File

@@ -0,0 +1,105 @@
// Package awserr represents API error interface accessors for the SDK.
package awserr
// An Error wraps lower level errors with code, message and an original error.
// The underlying concrete error type may also satisfy other interfaces which
// can be to used to obtain more specific information about the error.
//
// Calling Error() or String() will always include the full information about
// an error based on its underlying type.
//
// Example:
//
// output, err := s3manage.Upload(svc, input, opts)
// if err != nil {
// if awsErr, ok := err.(awserr.Error); ok {
// // Get error details
// log.Println("Error:", err.Code(), err.Message())
//
// // Prints out full error message, including original error if there was one.
// log.Println("Error:", err.Error())
//
// // Get original error
// if origErr := err.Err(); origErr != nil {
// // operate on original error.
// }
// } else {
// fmt.Println(err.Error())
// }
// }
//
type Error interface {
// Satisfy the generic error interface.
error
// Returns the short phrase depicting the classification of the error.
Code() string
// Returns the error details message.
Message() string
// Returns the original error if one was set. Nil is returned if not set.
OrigErr() error
}
// New returns an Error object described by the code, message, and origErr.
//
// If origErr satisfies the Error interface it will not be wrapped within a new
// Error object and will instead be returned.
func New(code, message string, origErr error) Error {
if e, ok := origErr.(Error); ok && e != nil {
return e
}
return newBaseError(code, message, origErr)
}
// A RequestFailure is an interface to extract request failure information from
// an Error such as the request ID of the failed request returned by a service.
// RequestFailures may not always have a requestID value if the request failed
// prior to reaching the service such as a connection error.
//
// Example:
//
// output, err := s3manage.Upload(svc, input, opts)
// if err != nil {
// if reqerr, ok := err.(RequestFailure); ok {
// log.Printf("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
// } else {
// log.Printf("Error:", err.Error()
// }
// }
//
// Combined with awserr.Error:
//
// output, err := s3manage.Upload(svc, input, opts)
// if err != nil {
// if awsErr, ok := err.(awserr.Error); ok {
// // Generic AWS Error with Code, Message, and original error (if any)
// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
//
// if reqErr, ok := err.(awserr.RequestFailure); ok {
// // A service error occurred
// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
// }
// } else {
// fmt.Println(err.Error())
// }
// }
//
type RequestFailure interface {
Error
// The status code of the HTTP response.
StatusCode() int
// The request ID returned by the service for a request failure. This will
// be empty if no request ID is available such as the request failed due
// to a connection error.
RequestID() string
}
// NewRequestFailure returns a new request error wrapper for the given Error
// provided.
func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
return newRequestError(err, statusCode, reqID)
}

View File

@@ -0,0 +1,135 @@
package awserr
import "fmt"
// SprintError returns a string of the formatted error code.
//
// Both extra and origErr are optional. If they are included their lines
// will be added, but if they are not included their lines will be ignored.
func SprintError(code, message, extra string, origErr error) string {
msg := fmt.Sprintf("%s: %s", code, message)
if extra != "" {
msg = fmt.Sprintf("%s\n\t%s", msg, extra)
}
if origErr != nil {
msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
}
return msg
}
// A baseError wraps the code and message which defines an error. It also
// can be used to wrap an original error object.
//
// Should be used as the root for errors satisfying the awserr.Error. Also
// for any error which does not fit into a specific error wrapper type.
type baseError struct {
// Classification of error
code string
// Detailed information about error
message string
// Optional original error this error is based off of. Allows building
// chained errors.
origErr error
}
// newBaseError returns an error object for the code, message, and err.
//
// code is a short no whitespace phrase depicting the classification of
// the error that is being created.
//
// message is the free flow string containing detailed information about the error.
//
// origErr is the error object which will be nested under the new error to be returned.
func newBaseError(code, message string, origErr error) *baseError {
return &baseError{
code: code,
message: message,
origErr: origErr,
}
}
// Error returns the string representation of the error.
//
// See ErrorWithExtra for formatting.
//
// Satisfies the error interface.
func (b baseError) Error() string {
return SprintError(b.code, b.message, "", b.origErr)
}
// String returns the string representation of the error.
// Alias for Error to satisfy the stringer interface.
func (b baseError) String() string {
return b.Error()
}
// Code returns the short phrase depicting the classification of the error.
func (b baseError) Code() string {
return b.code
}
// Message returns the error details message.
func (b baseError) Message() string {
return b.message
}
// OrigErr returns the original error if one was set. Nil is returned if no error
// was set.
func (b baseError) OrigErr() error {
return b.origErr
}
// So that the Error interface type can be included as an anonymous field
// in the requestError struct and not conflict with the error.Error() method.
type awsError Error
// A requestError wraps a request or service error.
//
// Composed of baseError for code, message, and original error.
type requestError struct {
awsError
statusCode int
requestID string
}
// newRequestError returns a wrapped error with additional information for request
// status code, and service requestID.
//
// Should be used to wrap all request which involve service requests. Even if
// the request failed without a service response, but had an HTTP status code
// that may be meaningful.
//
// Also wraps original errors via the baseError.
func newRequestError(err Error, statusCode int, requestID string) *requestError {
return &requestError{
awsError: err,
statusCode: statusCode,
requestID: requestID,
}
}
// Error returns the string representation of the error.
// Satisfies the error interface.
func (r requestError) Error() string {
extra := fmt.Sprintf("status code: %d, request id: %s",
r.statusCode, r.requestID)
return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
}
// String returns the string representation of the error.
// Alias for Error to satisfy the stringer interface.
func (r requestError) String() string {
return r.Error()
}
// StatusCode returns the wrapped status code for the error
func (r requestError) StatusCode() int {
return r.statusCode
}
// RequestID returns the wrapped requestID
func (r requestError) RequestID() string {
return r.requestID
}

View File

@@ -0,0 +1,100 @@
package awsutil
import (
"io"
"reflect"
)
// Copy deeply copies a src structure to dst. Useful for copying request and
// response structures.
//
// Can copy between structs of different type, but will only copy fields which
// are assignable, and exist in both structs. Fields which are not assignable,
// or do not exist in both structs are ignored.
func Copy(dst, src interface{}) {
dstval := reflect.ValueOf(dst)
if !dstval.IsValid() {
panic("Copy dst cannot be nil")
}
rcopy(dstval, reflect.ValueOf(src), true)
}
// CopyOf returns a copy of src while also allocating the memory for dst.
// src must be a pointer type or this operation will fail.
func CopyOf(src interface{}) (dst interface{}) {
dsti := reflect.New(reflect.TypeOf(src).Elem())
dst = dsti.Interface()
rcopy(dsti, reflect.ValueOf(src), true)
return
}
// rcopy performs a recursive copy of values from the source to destination.
//
// root is used to skip certain aspects of the copy which are not valid
// for the root node of a object.
func rcopy(dst, src reflect.Value, root bool) {
if !src.IsValid() {
return
}
switch src.Kind() {
case reflect.Ptr:
if _, ok := src.Interface().(io.Reader); ok {
if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
dst.Elem().Set(src)
} else if dst.CanSet() {
dst.Set(src)
}
} else {
e := src.Type().Elem()
if dst.CanSet() && !src.IsNil() {
dst.Set(reflect.New(e))
}
if src.Elem().IsValid() {
// Keep the current root state since the depth hasn't changed
rcopy(dst.Elem(), src.Elem(), root)
}
}
case reflect.Struct:
t := dst.Type()
for i := 0; i < t.NumField(); i++ {
name := t.Field(i).Name
srcVal := src.FieldByName(name)
dstVal := dst.FieldByName(name)
if srcVal.IsValid() && dstVal.CanSet() {
rcopy(dstVal, srcVal, false)
}
}
case reflect.Slice:
if src.IsNil() {
break
}
s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
dst.Set(s)
for i := 0; i < src.Len(); i++ {
rcopy(dst.Index(i), src.Index(i), false)
}
case reflect.Map:
if src.IsNil() {
break
}
s := reflect.MakeMap(src.Type())
dst.Set(s)
for _, k := range src.MapKeys() {
v := src.MapIndex(k)
v2 := reflect.New(v.Type()).Elem()
rcopy(v2, v, false)
dst.SetMapIndex(k, v2)
}
default:
// Assign the value if possible. If its not assignable, the value would
// need to be converted and the impact of that may be unexpected, or is
// not compatible with the dst type.
if src.Type().AssignableTo(dst.Type()) {
dst.Set(src)
}
}
}

View File

@@ -0,0 +1,233 @@
package awsutil_test
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"testing"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/stretchr/testify/assert"
)
func ExampleCopy() {
type Foo struct {
A int
B []*string
}
// Create the initial value
str1 := "hello"
str2 := "bye bye"
f1 := &Foo{A: 1, B: []*string{&str1, &str2}}
// Do the copy
var f2 Foo
awsutil.Copy(&f2, f1)
// Print the result
fmt.Println(awsutil.Prettify(f2))
// Output:
// {
// A: 1,
// B: ["hello","bye bye"]
// }
}
func TestCopy(t *testing.T) {
type Foo struct {
A int
B []*string
C map[string]*int
}
// Create the initial value
str1 := "hello"
str2 := "bye bye"
int1 := 1
int2 := 2
f1 := &Foo{
A: 1,
B: []*string{&str1, &str2},
C: map[string]*int{
"A": &int1,
"B": &int2,
},
}
// Do the copy
var f2 Foo
awsutil.Copy(&f2, f1)
// Values are equal
assert.Equal(t, f2.A, f1.A)
assert.Equal(t, f2.B, f1.B)
assert.Equal(t, f2.C, f1.C)
// But pointers are not!
str3 := "nothello"
int3 := 57
f2.A = 100
f2.B[0] = &str3
f2.C["B"] = &int3
assert.NotEqual(t, f2.A, f1.A)
assert.NotEqual(t, f2.B, f1.B)
assert.NotEqual(t, f2.C, f1.C)
}
func TestCopyNestedWithUnexported(t *testing.T) {
type Bar struct {
a int
B int
}
type Foo struct {
A string
B Bar
}
f1 := &Foo{A: "string", B: Bar{a: 1, B: 2}}
var f2 Foo
awsutil.Copy(&f2, f1)
// Values match
assert.Equal(t, f2.A, f1.A)
assert.NotEqual(t, f2.B, f1.B)
assert.NotEqual(t, f2.B.a, f1.B.a)
assert.Equal(t, f2.B.B, f2.B.B)
}
func TestCopyIgnoreNilMembers(t *testing.T) {
type Foo struct {
A *string
B []string
C map[string]string
}
f := &Foo{}
assert.Nil(t, f.A)
assert.Nil(t, f.B)
assert.Nil(t, f.C)
var f2 Foo
awsutil.Copy(&f2, f)
assert.Nil(t, f2.A)
assert.Nil(t, f2.B)
assert.Nil(t, f2.C)
fcopy := awsutil.CopyOf(f)
f3 := fcopy.(*Foo)
assert.Nil(t, f3.A)
assert.Nil(t, f3.B)
assert.Nil(t, f3.C)
}
func TestCopyPrimitive(t *testing.T) {
str := "hello"
var s string
awsutil.Copy(&s, &str)
assert.Equal(t, "hello", s)
}
func TestCopyNil(t *testing.T) {
var s string
awsutil.Copy(&s, nil)
assert.Equal(t, "", s)
}
func TestCopyReader(t *testing.T) {
var buf io.Reader = bytes.NewReader([]byte("hello world"))
var r io.Reader
awsutil.Copy(&r, buf)
b, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, []byte("hello world"), b)
// empty bytes because this is not a deep copy
b, err = ioutil.ReadAll(buf)
assert.NoError(t, err)
assert.Equal(t, []byte(""), b)
}
func TestCopyDifferentStructs(t *testing.T) {
type SrcFoo struct {
A int
B []*string
C map[string]*int
SrcUnique string
SameNameDiffType int
unexportedPtr *int
ExportedPtr *int
}
type DstFoo struct {
A int
B []*string
C map[string]*int
DstUnique int
SameNameDiffType string
unexportedPtr *int
ExportedPtr *int
}
// Create the initial value
str1 := "hello"
str2 := "bye bye"
int1 := 1
int2 := 2
f1 := &SrcFoo{
A: 1,
B: []*string{&str1, &str2},
C: map[string]*int{
"A": &int1,
"B": &int2,
},
SrcUnique: "unique",
SameNameDiffType: 1,
unexportedPtr: &int1,
ExportedPtr: &int2,
}
// Do the copy
var f2 DstFoo
awsutil.Copy(&f2, f1)
// Values are equal
assert.Equal(t, f2.A, f1.A)
assert.Equal(t, f2.B, f1.B)
assert.Equal(t, f2.C, f1.C)
assert.Equal(t, "unique", f1.SrcUnique)
assert.Equal(t, 1, f1.SameNameDiffType)
assert.Equal(t, 0, f2.DstUnique)
assert.Equal(t, "", f2.SameNameDiffType)
assert.Equal(t, int1, *f1.unexportedPtr)
assert.Nil(t, f2.unexportedPtr)
assert.Equal(t, int2, *f1.ExportedPtr)
assert.Equal(t, int2, *f2.ExportedPtr)
}
func ExampleCopyOf() {
type Foo struct {
A int
B []*string
}
// Create the initial value
str1 := "hello"
str2 := "bye bye"
f1 := &Foo{A: 1, B: []*string{&str1, &str2}}
// Do the copy
v := awsutil.CopyOf(f1)
var f2 *Foo = v.(*Foo)
// Print the result
fmt.Println(awsutil.Prettify(f2))
// Output:
// {
// A: 1,
// B: ["hello","bye bye"]
// }
}

View File

@@ -0,0 +1,27 @@
package awsutil
import (
"reflect"
)
// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
// In addition to this, this method will also dereference the input values if
// possible so the DeepEqual performed will not fail if one parameter is a
// pointer and the other is not.
//
// DeepEqual will not perform indirection of nested values of the input parameters.
func DeepEqual(a, b interface{}) bool {
ra := reflect.Indirect(reflect.ValueOf(a))
rb := reflect.Indirect(reflect.ValueOf(b))
if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
// If the elements are both nil, and of the same type the are equal
// If they are of different types they are not equal
return reflect.TypeOf(a) == reflect.TypeOf(b)
} else if raValid != rbValid {
// Both values must be valid to be equal
return false
}
return reflect.DeepEqual(ra.Interface(), rb.Interface())
}

View File

@@ -0,0 +1,29 @@
package awsutil_test
import (
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/stretchr/testify/assert"
)
func TestDeepEqual(t *testing.T) {
cases := []struct {
a, b interface{}
equal bool
}{
{"a", "a", true},
{"a", "b", false},
{"a", aws.String(""), false},
{"a", nil, false},
{"a", aws.String("a"), true},
{(*bool)(nil), (*bool)(nil), true},
{(*bool)(nil), (*string)(nil), false},
{nil, nil, true},
}
for i, c := range cases {
assert.Equal(t, c.equal, awsutil.DeepEqual(c.a, c.b), "%d, a:%v b:%v, %t", i, c.a, c.b, c.equal)
}
}

View File

@@ -0,0 +1,210 @@
package awsutil
import (
"reflect"
"regexp"
"strconv"
"strings"
"github.com/jmespath/go-jmespath"
)
var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
// rValuesAtPath returns a slice of values found in value v. The values
// in v are explored recursively so all nested values are collected.
func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool) []reflect.Value {
pathparts := strings.Split(path, "||")
if len(pathparts) > 1 {
for _, pathpart := range pathparts {
vals := rValuesAtPath(v, pathpart, create, caseSensitive)
if len(vals) > 0 {
return vals
}
}
return nil
}
values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
components := strings.Split(path, ".")
for len(values) > 0 && len(components) > 0 {
var index *int64
var indexStar bool
c := strings.TrimSpace(components[0])
if c == "" { // no actual component, illegal syntax
return nil
} else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
// TODO normalize case for user
return nil // don't support unexported fields
}
// parse this component
if m := indexRe.FindStringSubmatch(c); m != nil {
c = m[1]
if m[2] == "" {
index = nil
indexStar = true
} else {
i, _ := strconv.ParseInt(m[2], 10, 32)
index = &i
indexStar = false
}
}
nextvals := []reflect.Value{}
for _, value := range values {
// pull component name out of struct member
if value.Kind() != reflect.Struct {
continue
}
if c == "*" { // pull all members
for i := 0; i < value.NumField(); i++ {
if f := reflect.Indirect(value.Field(i)); f.IsValid() {
nextvals = append(nextvals, f)
}
}
continue
}
value = value.FieldByNameFunc(func(name string) bool {
if c == name {
return true
} else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
return true
}
return false
})
if create && value.Kind() == reflect.Ptr && value.IsNil() {
value.Set(reflect.New(value.Type().Elem()))
value = value.Elem()
} else {
value = reflect.Indirect(value)
}
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
if !create && value.IsNil() {
value = reflect.ValueOf(nil)
}
}
if value.IsValid() {
nextvals = append(nextvals, value)
}
}
values = nextvals
if indexStar || index != nil {
nextvals = []reflect.Value{}
for _, value := range values {
value := reflect.Indirect(value)
if value.Kind() != reflect.Slice {
continue
}
if indexStar { // grab all indices
for i := 0; i < value.Len(); i++ {
idx := reflect.Indirect(value.Index(i))
if idx.IsValid() {
nextvals = append(nextvals, idx)
}
}
continue
}
// pull out index
i := int(*index)
if i >= value.Len() { // check out of bounds
if create {
// TODO resize slice
} else {
continue
}
} else if i < 0 { // support negative indexing
i = value.Len() + i
}
value = reflect.Indirect(value.Index(i))
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
if !create && value.IsNil() {
value = reflect.ValueOf(nil)
}
}
if value.IsValid() {
nextvals = append(nextvals, value)
}
}
values = nextvals
}
components = components[1:]
}
return values
}
// ValuesAtPath returns a list of values at the case insensitive lexical
// path inside of a structure.
func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
result, err := jmespath.Search(path, i)
if err != nil {
return nil, err
}
v := reflect.ValueOf(result)
if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
return nil, nil
}
if s, ok := result.([]interface{}); ok {
return s, err
}
if v.Kind() == reflect.Map && v.Len() == 0 {
return nil, nil
}
if v.Kind() == reflect.Slice {
out := make([]interface{}, v.Len())
for i := 0; i < v.Len(); i++ {
out[i] = v.Index(i).Interface()
}
return out, nil
}
return []interface{}{result}, nil
}
// SetValueAtPath sets a value at the case insensitive lexical path inside
// of a structure.
func SetValueAtPath(i interface{}, path string, v interface{}) {
if rvals := rValuesAtPath(i, path, true, false); rvals != nil {
for _, rval := range rvals {
setValue(rval, v)
}
}
}
func setValue(dstVal reflect.Value, src interface{}) {
if dstVal.Kind() == reflect.Ptr {
dstVal = reflect.Indirect(dstVal)
}
srcVal := reflect.ValueOf(src)
if !srcVal.IsValid() { // src is literal nil
if dstVal.CanAddr() {
// Convert to pointer so that pointer's value can be nil'ed
// dstVal = dstVal.Addr()
}
dstVal.Set(reflect.Zero(dstVal.Type()))
} else if srcVal.Kind() == reflect.Ptr {
if srcVal.IsNil() {
srcVal = reflect.Zero(dstVal.Type())
} else {
srcVal = reflect.ValueOf(src).Elem()
}
dstVal.Set(srcVal)
} else {
dstVal.Set(srcVal)
}
}

View File

@@ -0,0 +1,108 @@
package awsutil_test
import (
"testing"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/stretchr/testify/assert"
)
type Struct struct {
A []Struct
z []Struct
B *Struct
D *Struct
C string
E map[string]string
}
var data = Struct{
A: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}},
z: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}},
B: &Struct{B: &Struct{C: "terminal"}, D: &Struct{C: "terminal2"}},
C: "initial",
}
var data2 = Struct{A: []Struct{
{A: []Struct{{C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}}},
{A: []Struct{{C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}}},
}}
func TestValueAtPathSuccess(t *testing.T) {
var testCases = []struct {
expect []interface{}
data interface{}
path string
}{
{[]interface{}{"initial"}, data, "C"},
{[]interface{}{"value1"}, data, "A[0].C"},
{[]interface{}{"value2"}, data, "A[1].C"},
{[]interface{}{"value3"}, data, "A[2].C"},
{[]interface{}{"value3"}, data, "a[2].c"},
{[]interface{}{"value3"}, data, "A[-1].C"},
{[]interface{}{"value1", "value2", "value3"}, data, "A[].C"},
{[]interface{}{"terminal"}, data, "B . B . C"},
{[]interface{}{"initial"}, data, "A.D.X || C"},
{[]interface{}{"initial"}, data, "A[0].B || C"},
{[]interface{}{
Struct{A: []Struct{{C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}, {C: "1"}}},
Struct{A: []Struct{{C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}, {C: "2"}}},
}, data2, "A"},
}
for i, c := range testCases {
v, err := awsutil.ValuesAtPath(c.data, c.path)
assert.NoError(t, err, "case %d, expected no error, %s", i, c.path)
assert.Equal(t, c.expect, v, "case %d, %s", i, c.path)
}
}
func TestValueAtPathFailure(t *testing.T) {
var testCases = []struct {
expect []interface{}
errContains string
data interface{}
path string
}{
{nil, "", data, "C.x"},
{nil, "SyntaxError: Invalid token: tDot", data, ".x"},
{nil, "", data, "X.Y.Z"},
{nil, "", data, "A[100].C"},
{nil, "", data, "A[3].C"},
{nil, "", data, "B.B.C.Z"},
{nil, "", data, "z[-1].C"},
{nil, "", nil, "A.B.C"},
{[]interface{}{}, "", Struct{}, "A"},
{nil, "", data, "A[0].B.C"},
{nil, "", data, "D"},
}
for i, c := range testCases {
v, err := awsutil.ValuesAtPath(c.data, c.path)
if c.errContains != "" {
assert.Contains(t, err.Error(), c.errContains, "case %d, expected error, %s", i, c.path)
continue
} else {
assert.NoError(t, err, "case %d, expected no error, %s", i, c.path)
}
assert.Equal(t, c.expect, v, "case %d, %s", i, c.path)
}
}
func TestSetValueAtPathSuccess(t *testing.T) {
var s Struct
awsutil.SetValueAtPath(&s, "C", "test1")
awsutil.SetValueAtPath(&s, "B.B.C", "test2")
awsutil.SetValueAtPath(&s, "B.D.C", "test3")
assert.Equal(t, "test1", s.C)
assert.Equal(t, "test2", s.B.B.C)
assert.Equal(t, "test3", s.B.D.C)
awsutil.SetValueAtPath(&s, "B.*.C", "test0")
assert.Equal(t, "test0", s.B.B.C)
assert.Equal(t, "test0", s.B.D.C)
var s2 Struct
awsutil.SetValueAtPath(&s2, "b.b.c", "test0")
assert.Equal(t, "test0", s2.B.B.C)
awsutil.SetValueAtPath(&s2, "A", []Struct{{}})
assert.Equal(t, []Struct{{}}, s2.A)
}

View File

@@ -0,0 +1,103 @@
package awsutil
import (
"bytes"
"fmt"
"io"
"reflect"
"strings"
)
// Prettify returns the string representation of a value.
func Prettify(i interface{}) string {
var buf bytes.Buffer
prettify(reflect.ValueOf(i), 0, &buf)
return buf.String()
}
// prettify will recursively walk value v to build a textual
// representation of the value.
func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
for v.Kind() == reflect.Ptr {
v = v.Elem()
}
switch v.Kind() {
case reflect.Struct:
strtype := v.Type().String()
if strtype == "time.Time" {
fmt.Fprintf(buf, "%s", v.Interface())
break
} else if strings.HasPrefix(strtype, "io.") {
buf.WriteString("<buffer>")
break
}
buf.WriteString("{\n")
names := []string{}
for i := 0; i < v.Type().NumField(); i++ {
name := v.Type().Field(i).Name
f := v.Field(i)
if name[0:1] == strings.ToLower(name[0:1]) {
continue // ignore unexported fields
}
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
continue // ignore unset fields
}
names = append(names, name)
}
for i, n := range names {
val := v.FieldByName(n)
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(n + ": ")
prettify(val, indent+2, buf)
if i < len(names)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
case reflect.Slice:
nl, id, id2 := "", "", ""
if v.Len() > 3 {
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
}
buf.WriteString("[" + nl)
for i := 0; i < v.Len(); i++ {
buf.WriteString(id2)
prettify(v.Index(i), indent+2, buf)
if i < v.Len()-1 {
buf.WriteString("," + nl)
}
}
buf.WriteString(nl + id + "]")
case reflect.Map:
buf.WriteString("{\n")
for i, k := range v.MapKeys() {
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(k.String() + ": ")
prettify(v.MapIndex(k), indent+2, buf)
if i < v.Len()-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
default:
format := "%v"
switch v.Interface().(type) {
case string:
format = "%q"
case io.ReadSeeker, io.Reader:
format = "buffer(%p)"
}
fmt.Fprintf(buf, format, v.Interface())
}
}

View File

@@ -0,0 +1,89 @@
package awsutil
import (
"bytes"
"fmt"
"reflect"
"strings"
)
// StringValue returns the string representation of a value.
func StringValue(i interface{}) string {
var buf bytes.Buffer
stringValue(reflect.ValueOf(i), 0, &buf)
return buf.String()
}
func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
for v.Kind() == reflect.Ptr {
v = v.Elem()
}
switch v.Kind() {
case reflect.Struct:
buf.WriteString("{\n")
names := []string{}
for i := 0; i < v.Type().NumField(); i++ {
name := v.Type().Field(i).Name
f := v.Field(i)
if name[0:1] == strings.ToLower(name[0:1]) {
continue // ignore unexported fields
}
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
continue // ignore unset fields
}
names = append(names, name)
}
for i, n := range names {
val := v.FieldByName(n)
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(n + ": ")
stringValue(val, indent+2, buf)
if i < len(names)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
case reflect.Slice:
nl, id, id2 := "", "", ""
if v.Len() > 3 {
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
}
buf.WriteString("[" + nl)
for i := 0; i < v.Len(); i++ {
buf.WriteString(id2)
stringValue(v.Index(i), indent+2, buf)
if i < v.Len()-1 {
buf.WriteString("," + nl)
}
}
buf.WriteString(nl + id + "]")
case reflect.Map:
buf.WriteString("{\n")
for i, k := range v.MapKeys() {
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(k.String() + ": ")
stringValue(v.MapIndex(k), indent+2, buf)
if i < v.Len()-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
default:
format := "%v"
switch v.Interface().(type) {
case string:
format = "%q"
}
fmt.Fprintf(buf, format, v.Interface())
}
}

View File

@@ -0,0 +1,111 @@
package client
import (
"fmt"
"io/ioutil"
"net/http/httputil"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
)
// A Config provides configuration to a service client instance.
type Config struct {
Config *aws.Config
Handlers request.Handlers
Endpoint, SigningRegion string
}
// ConfigProvider provides a generic way for a service client to receive
// the ClientConfig without circular dependencies.
type ConfigProvider interface {
ClientConfig(serviceName string, cfgs ...*aws.Config) Config
}
// A Client implements the base client request and response handling
// used by all service clients.
type Client struct {
request.Retryer
metadata.ClientInfo
Config aws.Config
Handlers request.Handlers
}
// New will return a pointer to a new initialized service client.
func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
svc := &Client{
Config: cfg,
ClientInfo: info,
Handlers: handlers,
}
maxRetries := aws.IntValue(cfg.MaxRetries)
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
maxRetries = 3
}
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
svc.AddDebugHandlers()
for _, option := range options {
option(svc)
}
return svc
}
// NewRequest returns a new Request pointer for the service API
// operation and parameters.
func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
}
// AddDebugHandlers injects debug logging handlers into the service to log request
// debug information.
func (c *Client) AddDebugHandlers() {
if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
return
}
c.Handlers.Send.PushFront(logRequest)
c.Handlers.Send.PushBack(logResponse)
}
const logReqMsg = `DEBUG: Request %s/%s Details:
---[ REQUEST POST-SIGN ]-----------------------------
%s
-----------------------------------------------------`
func logRequest(r *request.Request) {
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody)
if logBody {
// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
// Body as a NoOpCloser and will not be reset after read by the HTTP
// client reader.
r.Body.Seek(r.BodyStart, 0)
r.HTTPRequest.Body = ioutil.NopCloser(r.Body)
}
r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
}
const logRespMsg = `DEBUG: Response %s/%s Details:
---[ RESPONSE ]--------------------------------------
%s
-----------------------------------------------------`
func logResponse(r *request.Request) {
var msg = "no reponse data"
if r.HTTPResponse != nil {
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody)
msg = string(dumpedBody)
} else if r.Error != nil {
msg = r.Error.Error()
}
r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg))
}

View File

@@ -0,0 +1,45 @@
package client
import (
"math"
"math/rand"
"time"
"github.com/aws/aws-sdk-go/aws/request"
)
// DefaultRetryer implements basic retry logic using exponential backoff for
// most services. If you want to implement custom retry logic, implement the
// request.Retryer interface or create a structure type that composes this
// struct and override the specific methods. For example, to override only
// the MaxRetries method:
//
// type retryer struct {
// service.DefaultRetryer
// }
//
// // This implementation always has 100 max retries
// func (d retryer) MaxRetries() uint { return 100 }
type DefaultRetryer struct {
NumMaxRetries int
}
// MaxRetries returns the number of maximum returns the service will use to make
// an individual API request.
func (d DefaultRetryer) MaxRetries() int {
return d.NumMaxRetries
}
// RetryRules returns the delay duration before retrying this request again
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
delay := int(math.Pow(2, float64(r.RetryCount))) * (rand.Intn(30) + 30)
return time.Duration(delay) * time.Millisecond
}
// ShouldRetry returns if the request should be retried.
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
if r.HTTPResponse.StatusCode >= 500 {
return true
}
return r.IsErrorRetryable()
}

View File

@@ -0,0 +1,12 @@
package metadata
// ClientInfo wraps immutable data from the client.Client structure.
type ClientInfo struct {
ServiceName string
APIVersion string
Endpoint string
SigningName string
SigningRegion string
JSONVersion string
TargetPrefix string
}

View File

@@ -0,0 +1,248 @@
package aws
import (
"net/http"
"time"
"github.com/aws/aws-sdk-go/aws/credentials"
)
// UseServiceDefaultRetries instructs the config to use the service's own default
// number of retries. This will be the default action if Config.MaxRetries
// is nil also.
const UseServiceDefaultRetries = -1
// A Config provides service configuration for service clients. By default,
// all clients will use the {defaults.DefaultConfig} structure.
type Config struct {
// The credentials object to use when signing requests. Defaults to
// a chain of credential providers to search for credentials in environment
// variables, shared credential file, and EC2 Instance Roles.
Credentials *credentials.Credentials
// An optional endpoint URL (hostname only or fully qualified URI)
// that overrides the default generated endpoint for a client. Set this
// to `""` to use the default generated endpoint.
//
// @note You must still provide a `Region` value when specifying an
// endpoint for a client.
Endpoint *string
// The region to send requests to. This parameter is required and must
// be configured globally or on a per-client basis unless otherwise
// noted. A full list of regions is found in the "Regions and Endpoints"
// document.
//
// @see http://docs.aws.amazon.com/general/latest/gr/rande.html
// AWS Regions and Endpoints
Region *string
// Set this to `true` to disable SSL when sending requests. Defaults
// to `false`.
DisableSSL *bool
// The HTTP client to use when sending requests. Defaults to
// `http.DefaultClient`.
HTTPClient *http.Client
// An integer value representing the logging level. The default log level
// is zero (LogOff), which represents no logging. To enable logging set
// to a LogLevel Value.
LogLevel *LogLevelType
// The logger writer interface to write logging messages to. Defaults to
// standard out.
Logger Logger
// The maximum number of times that a request will be retried for failures.
// Defaults to -1, which defers the max retry setting to the service specific
// configuration.
MaxRetries *int
// Disables semantic parameter validation, which validates input for missing
// required fields and/or other semantic request input errors.
DisableParamValidation *bool
// Disables the computation of request and response checksums, e.g.,
// CRC32 checksums in Amazon DynamoDB.
DisableComputeChecksums *bool
// Set this to `true` to force the request to use path-style addressing,
// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will
// use virtual hosted bucket addressing when possible
// (`http://BUCKET.s3.amazonaws.com/KEY`).
//
// @note This configuration option is specific to the Amazon S3 service.
// @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
// Amazon S3: Virtual Hosting of Buckets
S3ForcePathStyle *bool
SleepDelay func(time.Duration)
}
// NewConfig returns a new Config pointer that can be chained with builder methods to
// set multiple configuration values inline without using pointers.
//
// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10))
//
func NewConfig() *Config {
return &Config{}
}
// WithCredentials sets a config Credentials value returning a Config pointer
// for chaining.
func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
c.Credentials = creds
return c
}
// WithEndpoint sets a config Endpoint value returning a Config pointer for
// chaining.
func (c *Config) WithEndpoint(endpoint string) *Config {
c.Endpoint = &endpoint
return c
}
// WithRegion sets a config Region value returning a Config pointer for
// chaining.
func (c *Config) WithRegion(region string) *Config {
c.Region = &region
return c
}
// WithDisableSSL sets a config DisableSSL value returning a Config pointer
// for chaining.
func (c *Config) WithDisableSSL(disable bool) *Config {
c.DisableSSL = &disable
return c
}
// WithHTTPClient sets a config HTTPClient value returning a Config pointer
// for chaining.
func (c *Config) WithHTTPClient(client *http.Client) *Config {
c.HTTPClient = client
return c
}
// WithMaxRetries sets a config MaxRetries value returning a Config pointer
// for chaining.
func (c *Config) WithMaxRetries(max int) *Config {
c.MaxRetries = &max
return c
}
// WithDisableParamValidation sets a config DisableParamValidation value
// returning a Config pointer for chaining.
func (c *Config) WithDisableParamValidation(disable bool) *Config {
c.DisableParamValidation = &disable
return c
}
// WithDisableComputeChecksums sets a config DisableComputeChecksums value
// returning a Config pointer for chaining.
func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
c.DisableComputeChecksums = &disable
return c
}
// WithLogLevel sets a config LogLevel value returning a Config pointer for
// chaining.
func (c *Config) WithLogLevel(level LogLevelType) *Config {
c.LogLevel = &level
return c
}
// WithLogger sets a config Logger value returning a Config pointer for
// chaining.
func (c *Config) WithLogger(logger Logger) *Config {
c.Logger = logger
return c
}
// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
// pointer for chaining.
func (c *Config) WithS3ForcePathStyle(force bool) *Config {
c.S3ForcePathStyle = &force
return c
}
// WithSleepDelay overrides the function used to sleep while waiting for the
// next retry. Defaults to time.Sleep.
func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
c.SleepDelay = fn
return c
}
// MergeIn merges the passed in configs into the existing config object.
func (c *Config) MergeIn(cfgs ...*Config) {
for _, other := range cfgs {
mergeInConfig(c, other)
}
}
func mergeInConfig(dst *Config, other *Config) {
if other == nil {
return
}
if other.Credentials != nil {
dst.Credentials = other.Credentials
}
if other.Endpoint != nil {
dst.Endpoint = other.Endpoint
}
if other.Region != nil {
dst.Region = other.Region
}
if other.DisableSSL != nil {
dst.DisableSSL = other.DisableSSL
}
if other.HTTPClient != nil {
dst.HTTPClient = other.HTTPClient
}
if other.LogLevel != nil {
dst.LogLevel = other.LogLevel
}
if other.Logger != nil {
dst.Logger = other.Logger
}
if other.MaxRetries != nil {
dst.MaxRetries = other.MaxRetries
}
if other.DisableParamValidation != nil {
dst.DisableParamValidation = other.DisableParamValidation
}
if other.DisableComputeChecksums != nil {
dst.DisableComputeChecksums = other.DisableComputeChecksums
}
if other.S3ForcePathStyle != nil {
dst.S3ForcePathStyle = other.S3ForcePathStyle
}
if other.SleepDelay != nil {
dst.SleepDelay = other.SleepDelay
}
}
// Copy will return a shallow copy of the Config object. If any additional
// configurations are provided they will be merged into the new config returned.
func (c *Config) Copy(cfgs ...*Config) *Config {
dst := &Config{}
dst.MergeIn(c)
for _, cfg := range cfgs {
dst.MergeIn(cfg)
}
return dst
}

View File

@@ -0,0 +1,86 @@
package aws
import (
"net/http"
"reflect"
"testing"
"github.com/aws/aws-sdk-go/aws/credentials"
)
var testCredentials = credentials.NewStaticCredentials("AKID", "SECRET", "SESSION")
var copyTestConfig = Config{
Credentials: testCredentials,
Endpoint: String("CopyTestEndpoint"),
Region: String("COPY_TEST_AWS_REGION"),
DisableSSL: Bool(true),
HTTPClient: http.DefaultClient,
LogLevel: LogLevel(LogDebug),
Logger: NewDefaultLogger(),
MaxRetries: Int(3),
DisableParamValidation: Bool(true),
DisableComputeChecksums: Bool(true),
S3ForcePathStyle: Bool(true),
}
func TestCopy(t *testing.T) {
want := copyTestConfig
got := copyTestConfig.Copy()
if !reflect.DeepEqual(*got, want) {
t.Errorf("Copy() = %+v", got)
t.Errorf(" want %+v", want)
}
got.Region = String("other")
if got.Region == want.Region {
t.Errorf("Expect setting copy values not not reflect in source")
}
}
func TestCopyReturnsNewInstance(t *testing.T) {
want := copyTestConfig
got := copyTestConfig.Copy()
if got == &want {
t.Errorf("Copy() = %p; want different instance as source %p", got, &want)
}
}
var mergeTestZeroValueConfig = Config{}
var mergeTestConfig = Config{
Credentials: testCredentials,
Endpoint: String("MergeTestEndpoint"),
Region: String("MERGE_TEST_AWS_REGION"),
DisableSSL: Bool(true),
HTTPClient: http.DefaultClient,
LogLevel: LogLevel(LogDebug),
Logger: NewDefaultLogger(),
MaxRetries: Int(10),
DisableParamValidation: Bool(true),
DisableComputeChecksums: Bool(true),
S3ForcePathStyle: Bool(true),
}
var mergeTests = []struct {
cfg *Config
in *Config
want *Config
}{
{&Config{}, nil, &Config{}},
{&Config{}, &mergeTestZeroValueConfig, &Config{}},
{&Config{}, &mergeTestConfig, &mergeTestConfig},
}
func TestMerge(t *testing.T) {
for i, tt := range mergeTests {
got := tt.cfg.Copy()
got.MergeIn(tt.in)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("Config %d %+v", i, tt.cfg)
t.Errorf(" Merge(%+v)", tt.in)
t.Errorf(" got %+v", got)
t.Errorf(" want %+v", tt.want)
}
}
}

View File

@@ -0,0 +1,357 @@
package aws
import "time"
// String returns a pointer to of the string value passed in.
func String(v string) *string {
return &v
}
// StringValue returns the value of the string pointer passed in or
// "" if the pointer is nil.
func StringValue(v *string) string {
if v != nil {
return *v
}
return ""
}
// StringSlice converts a slice of string values into a slice of
// string pointers
func StringSlice(src []string) []*string {
dst := make([]*string, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// StringValueSlice converts a slice of string pointers into a slice of
// string values
func StringValueSlice(src []*string) []string {
dst := make([]string, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// StringMap converts a string map of string values into a string
// map of string pointers
func StringMap(src map[string]string) map[string]*string {
dst := make(map[string]*string)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// StringValueMap converts a string map of string pointers into a string
// map of string values
func StringValueMap(src map[string]*string) map[string]string {
dst := make(map[string]string)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Bool returns a pointer to of the bool value passed in.
func Bool(v bool) *bool {
return &v
}
// BoolValue returns the value of the bool pointer passed in or
// false if the pointer is nil.
func BoolValue(v *bool) bool {
if v != nil {
return *v
}
return false
}
// BoolSlice converts a slice of bool values into a slice of
// bool pointers
func BoolSlice(src []bool) []*bool {
dst := make([]*bool, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// BoolValueSlice converts a slice of bool pointers into a slice of
// bool values
func BoolValueSlice(src []*bool) []bool {
dst := make([]bool, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// BoolMap converts a string map of bool values into a string
// map of bool pointers
func BoolMap(src map[string]bool) map[string]*bool {
dst := make(map[string]*bool)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// BoolValueMap converts a string map of bool pointers into a string
// map of bool values
func BoolValueMap(src map[string]*bool) map[string]bool {
dst := make(map[string]bool)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int returns a pointer to of the int value passed in.
func Int(v int) *int {
return &v
}
// IntValue returns the value of the int pointer passed in or
// 0 if the pointer is nil.
func IntValue(v *int) int {
if v != nil {
return *v
}
return 0
}
// IntSlice converts a slice of int values into a slice of
// int pointers
func IntSlice(src []int) []*int {
dst := make([]*int, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// IntValueSlice converts a slice of int pointers into a slice of
// int values
func IntValueSlice(src []*int) []int {
dst := make([]int, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// IntMap converts a string map of int values into a string
// map of int pointers
func IntMap(src map[string]int) map[string]*int {
dst := make(map[string]*int)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// IntValueMap converts a string map of int pointers into a string
// map of int values
func IntValueMap(src map[string]*int) map[string]int {
dst := make(map[string]int)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int64 returns a pointer to of the int64 value passed in.
func Int64(v int64) *int64 {
return &v
}
// Int64Value returns the value of the int64 pointer passed in or
// 0 if the pointer is nil.
func Int64Value(v *int64) int64 {
if v != nil {
return *v
}
return 0
}
// Int64Slice converts a slice of int64 values into a slice of
// int64 pointers
func Int64Slice(src []int64) []*int64 {
dst := make([]*int64, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Int64ValueSlice converts a slice of int64 pointers into a slice of
// int64 values
func Int64ValueSlice(src []*int64) []int64 {
dst := make([]int64, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Int64Map converts a string map of int64 values into a string
// map of int64 pointers
func Int64Map(src map[string]int64) map[string]*int64 {
dst := make(map[string]*int64)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Int64ValueMap converts a string map of int64 pointers into a string
// map of int64 values
func Int64ValueMap(src map[string]*int64) map[string]int64 {
dst := make(map[string]int64)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Float64 returns a pointer to of the float64 value passed in.
func Float64(v float64) *float64 {
return &v
}
// Float64Value returns the value of the float64 pointer passed in or
// 0 if the pointer is nil.
func Float64Value(v *float64) float64 {
if v != nil {
return *v
}
return 0
}
// Float64Slice converts a slice of float64 values into a slice of
// float64 pointers
func Float64Slice(src []float64) []*float64 {
dst := make([]*float64, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Float64ValueSlice converts a slice of float64 pointers into a slice of
// float64 values
func Float64ValueSlice(src []*float64) []float64 {
dst := make([]float64, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Float64Map converts a string map of float64 values into a string
// map of float64 pointers
func Float64Map(src map[string]float64) map[string]*float64 {
dst := make(map[string]*float64)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Float64ValueMap converts a string map of float64 pointers into a string
// map of float64 values
func Float64ValueMap(src map[string]*float64) map[string]float64 {
dst := make(map[string]float64)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Time returns a pointer to of the time.Time value passed in.
func Time(v time.Time) *time.Time {
return &v
}
// TimeValue returns the value of the time.Time pointer passed in or
// time.Time{} if the pointer is nil.
func TimeValue(v *time.Time) time.Time {
if v != nil {
return *v
}
return time.Time{}
}
// TimeSlice converts a slice of time.Time values into a slice of
// time.Time pointers
func TimeSlice(src []time.Time) []*time.Time {
dst := make([]*time.Time, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// TimeValueSlice converts a slice of time.Time pointers into a slice of
// time.Time values
func TimeValueSlice(src []*time.Time) []time.Time {
dst := make([]time.Time, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// TimeMap converts a string map of time.Time values into a string
// map of time.Time pointers
func TimeMap(src map[string]time.Time) map[string]*time.Time {
dst := make(map[string]*time.Time)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// TimeValueMap converts a string map of time.Time pointers into a string
// map of time.Time values
func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
dst := make(map[string]time.Time)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}

View File

@@ -0,0 +1,437 @@
package aws
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
var testCasesStringSlice = [][]string{
{"a", "b", "c", "d", "e"},
{"a", "b", "", "", "e"},
}
func TestStringSlice(t *testing.T) {
for idx, in := range testCasesStringSlice {
if in == nil {
continue
}
out := StringSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := StringValueSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesStringValueSlice = [][]*string{
{String("a"), String("b"), nil, String("c")},
}
func TestStringValueSlice(t *testing.T) {
for idx, in := range testCasesStringValueSlice {
if in == nil {
continue
}
out := StringValueSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
if in[i] == nil {
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
}
}
out2 := StringSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
for i := range out2 {
if in[i] == nil {
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
}
}
}
}
var testCasesStringMap = []map[string]string{
{"a": "1", "b": "2", "c": "3"},
}
func TestStringMap(t *testing.T) {
for idx, in := range testCasesStringMap {
if in == nil {
continue
}
out := StringMap(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := StringValueMap(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesBoolSlice = [][]bool{
{true, true, false, false},
}
func TestBoolSlice(t *testing.T) {
for idx, in := range testCasesBoolSlice {
if in == nil {
continue
}
out := BoolSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := BoolValueSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesBoolValueSlice = [][]*bool{}
func TestBoolValueSlice(t *testing.T) {
for idx, in := range testCasesBoolValueSlice {
if in == nil {
continue
}
out := BoolValueSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
if in[i] == nil {
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
}
}
out2 := BoolSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
for i := range out2 {
if in[i] == nil {
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
}
}
}
}
var testCasesBoolMap = []map[string]bool{
{"a": true, "b": false, "c": true},
}
func TestBoolMap(t *testing.T) {
for idx, in := range testCasesBoolMap {
if in == nil {
continue
}
out := BoolMap(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := BoolValueMap(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesIntSlice = [][]int{
{1, 2, 3, 4},
}
func TestIntSlice(t *testing.T) {
for idx, in := range testCasesIntSlice {
if in == nil {
continue
}
out := IntSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := IntValueSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesIntValueSlice = [][]*int{}
func TestIntValueSlice(t *testing.T) {
for idx, in := range testCasesIntValueSlice {
if in == nil {
continue
}
out := IntValueSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
if in[i] == nil {
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
}
}
out2 := IntSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
for i := range out2 {
if in[i] == nil {
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
}
}
}
}
var testCasesIntMap = []map[string]int{
{"a": 3, "b": 2, "c": 1},
}
func TestIntMap(t *testing.T) {
for idx, in := range testCasesIntMap {
if in == nil {
continue
}
out := IntMap(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := IntValueMap(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesInt64Slice = [][]int64{
{1, 2, 3, 4},
}
func TestInt64Slice(t *testing.T) {
for idx, in := range testCasesInt64Slice {
if in == nil {
continue
}
out := Int64Slice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := Int64ValueSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesInt64ValueSlice = [][]*int64{}
func TestInt64ValueSlice(t *testing.T) {
for idx, in := range testCasesInt64ValueSlice {
if in == nil {
continue
}
out := Int64ValueSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
if in[i] == nil {
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
}
}
out2 := Int64Slice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
for i := range out2 {
if in[i] == nil {
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
}
}
}
}
var testCasesInt64Map = []map[string]int64{
{"a": 3, "b": 2, "c": 1},
}
func TestInt64Map(t *testing.T) {
for idx, in := range testCasesInt64Map {
if in == nil {
continue
}
out := Int64Map(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := Int64ValueMap(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesFloat64Slice = [][]float64{
{1, 2, 3, 4},
}
func TestFloat64Slice(t *testing.T) {
for idx, in := range testCasesFloat64Slice {
if in == nil {
continue
}
out := Float64Slice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := Float64ValueSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesFloat64ValueSlice = [][]*float64{}
func TestFloat64ValueSlice(t *testing.T) {
for idx, in := range testCasesFloat64ValueSlice {
if in == nil {
continue
}
out := Float64ValueSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
if in[i] == nil {
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
}
}
out2 := Float64Slice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
for i := range out2 {
if in[i] == nil {
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
}
}
}
}
var testCasesFloat64Map = []map[string]float64{
{"a": 3, "b": 2, "c": 1},
}
func TestFloat64Map(t *testing.T) {
for idx, in := range testCasesFloat64Map {
if in == nil {
continue
}
out := Float64Map(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := Float64ValueMap(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesTimeSlice = [][]time.Time{
{time.Now(), time.Now().AddDate(100, 0, 0)},
}
func TestTimeSlice(t *testing.T) {
for idx, in := range testCasesTimeSlice {
if in == nil {
continue
}
out := TimeSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := TimeValueSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesTimeValueSlice = [][]*time.Time{}
func TestTimeValueSlice(t *testing.T) {
for idx, in := range testCasesTimeValueSlice {
if in == nil {
continue
}
out := TimeValueSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
if in[i] == nil {
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
}
}
out2 := TimeSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
for i := range out2 {
if in[i] == nil {
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
}
}
}
}
var testCasesTimeMap = []map[string]time.Time{
{"a": time.Now().AddDate(-100, 0, 0), "b": time.Now()},
}
func TestTimeMap(t *testing.T) {
for idx, in := range testCasesTimeMap {
if in == nil {
continue
}
out := TimeMap(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := TimeValueMap(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}

View File

@@ -0,0 +1,139 @@
package corehandlers
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"runtime"
"strconv"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
// Interface for matching types which also have a Len method.
type lener interface {
Len() int
}
// BuildContentLengthHandler builds the content length of a request based on the body,
// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
// to determine request body length and no "Content-Length" was specified it will panic.
var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
length, _ := strconv.ParseInt(slength, 10, 64)
r.HTTPRequest.ContentLength = length
return
}
var length int64
switch body := r.Body.(type) {
case nil:
length = 0
case lener:
length = int64(body.Len())
case io.Seeker:
r.BodyStart, _ = body.Seek(0, 1)
end, _ := body.Seek(0, 2)
body.Seek(r.BodyStart, 0) // make sure to seek back to original location
length = end - r.BodyStart
default:
panic("Cannot get length of body, must provide `ContentLength`")
}
r.HTTPRequest.ContentLength = length
r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
}}
// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
var SDKVersionUserAgentHandler = request.NamedHandler{
Name: "core.SDKVersionUserAgentHandler",
Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
runtime.Version(), runtime.GOOS, runtime.GOARCH),
}
var reStatusCode = regexp.MustCompile(`^(\d{3})`)
// SendHandler is a request handler to send service request using HTTP client.
var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) {
var err error
r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest)
if err != nil {
// Capture the case where url.Error is returned for error processing
// response. e.g. 301 without location header comes back as string
// error and r.HTTPResponse is nil. Other url redirect errors will
// comeback in a similar method.
if e, ok := err.(*url.Error); ok && e.Err != nil {
if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
code, _ := strconv.ParseInt(s[1], 10, 64)
r.HTTPResponse = &http.Response{
StatusCode: int(code),
Status: http.StatusText(int(code)),
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}
return
}
}
if r.HTTPResponse == nil {
// Add a dummy request response object to ensure the HTTPResponse
// value is consistent.
r.HTTPResponse = &http.Response{
StatusCode: int(0),
Status: http.StatusText(int(0)),
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}
}
// Catch all other request errors.
r.Error = awserr.New("RequestError", "send request failed", err)
r.Retryable = aws.Bool(true) // network errors are retryable
}
}}
// ValidateResponseHandler is a request handler to validate service response.
var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
// this may be replaced by an UnmarshalError handler
r.Error = awserr.New("UnknownError", "unknown error", nil)
}
}}
// AfterRetryHandler performs final checks to determine if the request should
// be retried and how long to delay.
var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
// If one of the other handlers already set the retry state
// we don't want to override it based on the service's state
if r.Retryable == nil {
r.Retryable = aws.Bool(r.ShouldRetry(r))
}
if r.WillRetry() {
r.RetryDelay = r.RetryRules(r)
r.Config.SleepDelay(r.RetryDelay)
// when the expired token exception occurs the credentials
// need to be expired locally so that the next request to
// get credentials will trigger a credentials refresh.
if r.IsErrorExpired() {
r.Config.Credentials.Expire()
}
r.RetryCount++
r.Error = nil
}
}}
// ValidateEndpointHandler is a request handler to validate a request had the
// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
// region is not valid.
var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
r.Error = aws.ErrMissingRegion
} else if r.ClientInfo.Endpoint == "" {
r.Error = aws.ErrMissingEndpoint
}
}}

View File

@@ -0,0 +1,113 @@
package corehandlers_test
import (
"fmt"
"net/http"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/awstesting"
)
func TestValidateEndpointHandler(t *testing.T) {
os.Clearenv()
svc := awstesting.NewClient(aws.NewConfig().WithRegion("us-west-2"))
svc.Handlers.Clear()
svc.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
err := req.Build()
assert.NoError(t, err)
}
func TestValidateEndpointHandlerErrorRegion(t *testing.T) {
os.Clearenv()
svc := awstesting.NewClient()
svc.Handlers.Clear()
svc.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
err := req.Build()
assert.Error(t, err)
assert.Equal(t, aws.ErrMissingRegion, err)
}
type mockCredsProvider struct {
expired bool
retrieveCalled bool
}
func (m *mockCredsProvider) Retrieve() (credentials.Value, error) {
m.retrieveCalled = true
return credentials.Value{}, nil
}
func (m *mockCredsProvider) IsExpired() bool {
return m.expired
}
func TestAfterRetryRefreshCreds(t *testing.T) {
os.Clearenv()
credProvider := &mockCredsProvider{}
svc := awstesting.NewClient(&aws.Config{
Credentials: credentials.NewCredentials(credProvider),
MaxRetries: aws.Int(1),
})
svc.Handlers.Clear()
svc.Handlers.ValidateResponse.PushBack(func(r *request.Request) {
r.Error = awserr.New("UnknownError", "", nil)
r.HTTPResponse = &http.Response{StatusCode: 400}
})
svc.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
r.Error = awserr.New("ExpiredTokenException", "", nil)
})
svc.Handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
assert.True(t, svc.Config.Credentials.IsExpired(), "Expect to start out expired")
assert.False(t, credProvider.retrieveCalled)
req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
req.Send()
assert.True(t, svc.Config.Credentials.IsExpired())
assert.False(t, credProvider.retrieveCalled)
_, err := svc.Config.Credentials.Get()
assert.NoError(t, err)
assert.True(t, credProvider.retrieveCalled)
}
type testSendHandlerTransport struct{}
func (t *testSendHandlerTransport) RoundTrip(r *http.Request) (*http.Response, error) {
return nil, fmt.Errorf("mock error")
}
func TestSendHandlerError(t *testing.T) {
svc := awstesting.NewClient(&aws.Config{
HTTPClient: &http.Client{
Transport: &testSendHandlerTransport{},
},
})
svc.Handlers.Clear()
svc.Handlers.Send.PushBackNamed(corehandlers.SendHandler)
r := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
r.Send()
assert.Error(t, r.Error)
assert.NotNil(t, r.HTTPResponse)
}

View File

@@ -0,0 +1,144 @@
package corehandlers
import (
"fmt"
"reflect"
"strconv"
"strings"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
// ValidateParametersHandler is a request handler to validate the input parameters.
// Validating parameters only has meaning if done prior to the request being sent.
var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
if r.ParamsFilled() {
v := validator{errors: []string{}}
v.validateAny(reflect.ValueOf(r.Params), "")
if count := len(v.errors); count > 0 {
format := "%d validation errors:\n- %s"
msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- "))
r.Error = awserr.New("InvalidParameter", msg, nil)
}
}
}}
// A validator validates values. Collects validations errors which occurs.
type validator struct {
errors []string
}
// validateAny will validate any struct, slice or map type. All validations
// are also performed recursively for nested types.
func (v *validator) validateAny(value reflect.Value, path string) {
value = reflect.Indirect(value)
if !value.IsValid() {
return
}
switch value.Kind() {
case reflect.Struct:
v.validateStruct(value, path)
case reflect.Slice:
for i := 0; i < value.Len(); i++ {
v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i))
}
case reflect.Map:
for _, n := range value.MapKeys() {
v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String()))
}
}
}
// validateStruct will validate the struct value's fields. If the structure has
// nested types those types will be validated also.
func (v *validator) validateStruct(value reflect.Value, path string) {
prefix := "."
if path == "" {
prefix = ""
}
for i := 0; i < value.Type().NumField(); i++ {
f := value.Type().Field(i)
if strings.ToLower(f.Name[0:1]) == f.Name[0:1] {
continue
}
fvalue := value.FieldByName(f.Name)
err := validateField(f, fvalue, validateFieldRequired, validateFieldMin)
if err != nil {
v.errors = append(v.errors, fmt.Sprintf("%s: %s", err.Error(), path+prefix+f.Name))
continue
}
v.validateAny(fvalue, path+prefix+f.Name)
}
}
type validatorFunc func(f reflect.StructField, fvalue reflect.Value) error
func validateField(f reflect.StructField, fvalue reflect.Value, funcs ...validatorFunc) error {
for _, fn := range funcs {
if err := fn(f, fvalue); err != nil {
return err
}
}
return nil
}
// Validates that a field has a valid value provided for required fields.
func validateFieldRequired(f reflect.StructField, fvalue reflect.Value) error {
if f.Tag.Get("required") == "" {
return nil
}
switch fvalue.Kind() {
case reflect.Ptr, reflect.Slice, reflect.Map:
if fvalue.IsNil() {
return fmt.Errorf("missing required parameter")
}
default:
if !fvalue.IsValid() {
return fmt.Errorf("missing required parameter")
}
}
return nil
}
// Validates that if a value is provided for a field, that value must be at
// least a minimum length.
func validateFieldMin(f reflect.StructField, fvalue reflect.Value) error {
minStr := f.Tag.Get("min")
if minStr == "" {
return nil
}
min, _ := strconv.ParseInt(minStr, 10, 64)
kind := fvalue.Kind()
if kind == reflect.Ptr {
if fvalue.IsNil() {
return nil
}
fvalue = fvalue.Elem()
}
switch fvalue.Kind() {
case reflect.String:
if int64(fvalue.Len()) < min {
return fmt.Errorf("field too short, minimum length %d", min)
}
case reflect.Slice, reflect.Map:
if fvalue.IsNil() {
return nil
}
if int64(fvalue.Len()) < min {
return fmt.Errorf("field too short, minimum length %d", min)
}
// TODO min can also apply to number minimum value.
}
return nil
}

View File

@@ -0,0 +1,134 @@
package corehandlers_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/stretchr/testify/require"
)
var testSvc = func() *client.Client {
s := &client.Client{
Config: aws.Config{},
ClientInfo: metadata.ClientInfo{
ServiceName: "mock-service",
APIVersion: "2015-01-01",
},
}
return s
}()
type StructShape struct {
RequiredList []*ConditionalStructShape `required:"true"`
RequiredMap map[string]*ConditionalStructShape `required:"true"`
RequiredBool *bool `required:"true"`
OptionalStruct *ConditionalStructShape
hiddenParameter *string
metadataStructureShape
}
type metadataStructureShape struct {
SDKShapeTraits bool
}
type ConditionalStructShape struct {
Name *string `required:"true"`
SDKShapeTraits bool
}
func TestNoErrors(t *testing.T) {
input := &StructShape{
RequiredList: []*ConditionalStructShape{},
RequiredMap: map[string]*ConditionalStructShape{
"key1": {Name: aws.String("Name")},
"key2": {Name: aws.String("Name")},
},
RequiredBool: aws.Bool(true),
OptionalStruct: &ConditionalStructShape{Name: aws.String("Name")},
}
req := testSvc.NewRequest(&request.Operation{}, input, nil)
corehandlers.ValidateParametersHandler.Fn(req)
require.NoError(t, req.Error)
}
func TestMissingRequiredParameters(t *testing.T) {
input := &StructShape{}
req := testSvc.NewRequest(&request.Operation{}, input, nil)
corehandlers.ValidateParametersHandler.Fn(req)
require.Error(t, req.Error)
assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code())
assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList\n- missing required parameter: RequiredMap\n- missing required parameter: RequiredBool", req.Error.(awserr.Error).Message())
}
func TestNestedMissingRequiredParameters(t *testing.T) {
input := &StructShape{
RequiredList: []*ConditionalStructShape{{}},
RequiredMap: map[string]*ConditionalStructShape{
"key1": {Name: aws.String("Name")},
"key2": {},
},
RequiredBool: aws.Bool(true),
OptionalStruct: &ConditionalStructShape{},
}
req := testSvc.NewRequest(&request.Operation{}, input, nil)
corehandlers.ValidateParametersHandler.Fn(req)
require.Error(t, req.Error)
assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code())
assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList[0].Name\n- missing required parameter: RequiredMap[\"key2\"].Name\n- missing required parameter: OptionalStruct.Name", req.Error.(awserr.Error).Message())
}
type testInput struct {
StringField string `min:"5"`
PtrStrField *string `min:"2"`
ListField []string `min:"3"`
MapField map[string]string `min:"4"`
}
var testsFieldMin = []struct {
err awserr.Error
in testInput
}{
{
err: awserr.New("InvalidParameter", "1 validation errors:\n- field too short, minimum length 5: StringField", nil),
in: testInput{StringField: "abcd"},
},
{
err: awserr.New("InvalidParameter", "2 validation errors:\n- field too short, minimum length 5: StringField\n- field too short, minimum length 3: ListField", nil),
in: testInput{StringField: "abcd", ListField: []string{"a", "b"}},
},
{
err: awserr.New("InvalidParameter", "3 validation errors:\n- field too short, minimum length 5: StringField\n- field too short, minimum length 3: ListField\n- field too short, minimum length 4: MapField", nil),
in: testInput{StringField: "abcd", ListField: []string{"a", "b"}, MapField: map[string]string{"a": "a", "b": "b"}},
},
{
err: awserr.New("InvalidParameter", "1 validation errors:\n- field too short, minimum length 2: PtrStrField", nil),
in: testInput{StringField: "abcde", PtrStrField: aws.String("v")},
},
{
err: nil,
in: testInput{StringField: "abcde", PtrStrField: aws.String("value"),
ListField: []string{"a", "b", "c"}, MapField: map[string]string{"a": "a", "b": "b", "c": "c", "d": "d"}},
},
}
func TestValidateFieldMinParameter(t *testing.T) {
for i, c := range testsFieldMin {
req := testSvc.NewRequest(&request.Operation{}, &c.in, nil)
corehandlers.ValidateParametersHandler.Fn(req)
require.Equal(t, c.err, req.Error, "%d case failed", i)
}
}

View File

@@ -0,0 +1,85 @@
package credentials
import (
"github.com/aws/aws-sdk-go/aws/awserr"
)
var (
// ErrNoValidProvidersFoundInChain Is returned when there are no valid
// providers in the ChainProvider.
//
// @readonly
ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", "no valid providers in chain", nil)
)
// A ChainProvider will search for a provider which returns credentials
// and cache that provider until Retrieve is called again.
//
// The ChainProvider provides a way of chaining multiple providers together
// which will pick the first available using priority order of the Providers
// in the list.
//
// If none of the Providers retrieve valid credentials Value, ChainProvider's
// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
//
// If a Provider is found which returns valid credentials Value ChainProvider
// will cache that Provider for all calls to IsExpired(), until Retrieve is
// called again.
//
// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
// In this example EnvProvider will first check if any credentials are available
// vai the environment variables. If there are none ChainProvider will check
// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
// does not return any credentials ChainProvider will return the error
// ErrNoValidProvidersFoundInChain
//
// creds := NewChainCredentials(
// []Provider{
// &EnvProvider{},
// &EC2RoleProvider{},
// })
//
// // Usage of ChainCredentials with aws.Config
// svc := ec2.New(&aws.Config{Credentials: creds})
//
type ChainProvider struct {
Providers []Provider
curr Provider
}
// NewChainCredentials returns a pointer to a new Credentials object
// wrapping a chain of providers.
func NewChainCredentials(providers []Provider) *Credentials {
return NewCredentials(&ChainProvider{
Providers: append([]Provider{}, providers...),
})
}
// Retrieve returns the credentials value or error if no provider returned
// without error.
//
// If a provider is found it will be cached and any calls to IsExpired()
// will return the expired state of the cached provider.
func (c *ChainProvider) Retrieve() (Value, error) {
for _, p := range c.Providers {
if creds, err := p.Retrieve(); err == nil {
c.curr = p
return creds, nil
}
}
c.curr = nil
// TODO better error reporting. maybe report error for each failed retrieve?
return Value{}, ErrNoValidProvidersFoundInChain
}
// IsExpired will returned the expired state of the currently cached provider
// if there is one. If there is no current provider, true will be returned.
func (c *ChainProvider) IsExpired() bool {
if c.curr != nil {
return c.curr.IsExpired()
}
return true
}

View File

@@ -0,0 +1,73 @@
package credentials
import (
"testing"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/stretchr/testify/assert"
)
func TestChainProviderGet(t *testing.T) {
p := &ChainProvider{
Providers: []Provider{
&stubProvider{err: awserr.New("FirstError", "first provider error", nil)},
&stubProvider{err: awserr.New("SecondError", "second provider error", nil)},
&stubProvider{
creds: Value{
AccessKeyID: "AKID",
SecretAccessKey: "SECRET",
SessionToken: "",
},
},
},
}
creds, err := p.Retrieve()
assert.Nil(t, err, "Expect no error")
assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match")
assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match")
assert.Empty(t, creds.SessionToken, "Expect session token to be empty")
}
func TestChainProviderIsExpired(t *testing.T) {
stubProvider := &stubProvider{expired: true}
p := &ChainProvider{
Providers: []Provider{
stubProvider,
},
}
assert.True(t, p.IsExpired(), "Expect expired to be true before any Retrieve")
_, err := p.Retrieve()
assert.Nil(t, err, "Expect no error")
assert.False(t, p.IsExpired(), "Expect not expired after retrieve")
stubProvider.expired = true
assert.True(t, p.IsExpired(), "Expect return of expired provider")
_, err = p.Retrieve()
assert.False(t, p.IsExpired(), "Expect not expired after retrieve")
}
func TestChainProviderWithNoProvider(t *testing.T) {
p := &ChainProvider{
Providers: []Provider{},
}
assert.True(t, p.IsExpired(), "Expect expired with no providers")
_, err := p.Retrieve()
assert.Equal(t, ErrNoValidProvidersFoundInChain, err, "Expect no providers error returned")
}
func TestChainProviderWithNoValidProvider(t *testing.T) {
p := &ChainProvider{
Providers: []Provider{
&stubProvider{err: awserr.New("FirstError", "first provider error", nil)},
&stubProvider{err: awserr.New("SecondError", "second provider error", nil)},
},
}
assert.True(t, p.IsExpired(), "Expect expired with no providers")
_, err := p.Retrieve()
assert.Equal(t, ErrNoValidProvidersFoundInChain, err, "Expect no providers error returned")
}

View File

@@ -0,0 +1,220 @@
// Package credentials provides credential retrieval and management
//
// The Credentials is the primary method of getting access to and managing
// credentials Values. Using dependency injection retrieval of the credential
// values is handled by a object which satisfies the Provider interface.
//
// By default the Credentials.Get() will cache the successful result of a
// Provider's Retrieve() until Provider.IsExpired() returns true. At which
// point Credentials will call Provider's Retrieve() to get new credential Value.
//
// The Provider is responsible for determining when credentials Value have expired.
// It is also important to note that Credentials will always call Retrieve the
// first time Credentials.Get() is called.
//
// Example of using the environment variable credentials.
//
// creds := NewEnvCredentials()
//
// // Retrieve the credentials value
// credValue, err := creds.Get()
// if err != nil {
// // handle error
// }
//
// Example of forcing credentials to expire and be refreshed on the next Get().
// This may be helpful to proactively expire credentials and refresh them sooner
// than they would naturally expire on their own.
//
// creds := NewCredentials(&EC2RoleProvider{})
// creds.Expire()
// credsValue, err := creds.Get()
// // New credentials will be retrieved instead of from cache.
//
//
// Custom Provider
//
// Each Provider built into this package also provides a helper method to generate
// a Credentials pointer setup with the provider. To use a custom Provider just
// create a type which satisfies the Provider interface and pass it to the
// NewCredentials method.
//
// type MyProvider struct{}
// func (m *MyProvider) Retrieve() (Value, error) {...}
// func (m *MyProvider) IsExpired() bool {...}
//
// creds := NewCredentials(&MyProvider{})
// credValue, err := creds.Get()
//
package credentials
import (
"sync"
"time"
)
// AnonymousCredentials is an empty Credential object that can be used as
// dummy placeholder credentials for requests that do not need signed.
//
// This Credentials can be used to configure a service to not sign requests
// when making service API calls. For example, when accessing public
// s3 buckets.
//
// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials})
// // Access public S3 buckets.
//
// @readonly
var AnonymousCredentials = NewStaticCredentials("", "", "")
// A Value is the AWS credentials value for individual credential fields.
type Value struct {
// AWS Access key ID
AccessKeyID string
// AWS Secret Access Key
SecretAccessKey string
// AWS Session Token
SessionToken string
}
// A Provider is the interface for any component which will provide credentials
// Value. A provider is required to manage its own Expired state, and what to
// be expired means.
//
// The Provider should not need to implement its own mutexes, because
// that will be managed by Credentials.
type Provider interface {
// Refresh returns nil if it successfully retrieved the value.
// Error is returned if the value were not obtainable, or empty.
Retrieve() (Value, error)
// IsExpired returns if the credentials are no longer valid, and need
// to be retrieved.
IsExpired() bool
}
// A Expiry provides shared expiration logic to be used by credentials
// providers to implement expiry functionality.
//
// The best method to use this struct is as an anonymous field within the
// provider's struct.
//
// Example:
// type EC2RoleProvider struct {
// Expiry
// ...
// }
type Expiry struct {
// The date/time when to expire on
expiration time.Time
// If set will be used by IsExpired to determine the current time.
// Defaults to time.Now if CurrentTime is not set. Available for testing
// to be able to mock out the current time.
CurrentTime func() time.Time
}
// SetExpiration sets the expiration IsExpired will check when called.
//
// If window is greater than 0 the expiration time will be reduced by the
// window value.
//
// Using a window is helpful to trigger credentials to expire sooner than
// the expiration time given to ensure no requests are made with expired
// tokens.
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
e.expiration = expiration
if window > 0 {
e.expiration = e.expiration.Add(-window)
}
}
// IsExpired returns if the credentials are expired.
func (e *Expiry) IsExpired() bool {
if e.CurrentTime == nil {
e.CurrentTime = time.Now
}
return e.expiration.Before(e.CurrentTime())
}
// A Credentials provides synchronous safe retrieval of AWS credentials Value.
// Credentials will cache the credentials value until they expire. Once the value
// expires the next Get will attempt to retrieve valid credentials.
//
// Credentials is safe to use across multiple goroutines and will manage the
// synchronous state so the Providers do not need to implement their own
// synchronization.
//
// The first Credentials.Get() will always call Provider.Retrieve() to get the
// first instance of the credentials Value. All calls to Get() after that
// will return the cached credentials Value until IsExpired() returns true.
type Credentials struct {
creds Value
forceRefresh bool
m sync.Mutex
provider Provider
}
// NewCredentials returns a pointer to a new Credentials with the provider set.
func NewCredentials(provider Provider) *Credentials {
return &Credentials{
provider: provider,
forceRefresh: true,
}
}
// Get returns the credentials value, or error if the credentials Value failed
// to be retrieved.
//
// Will return the cached credentials Value if it has not expired. If the
// credentials Value has expired the Provider's Retrieve() will be called
// to refresh the credentials.
//
// If Credentials.Expire() was called the credentials Value will be force
// expired, and the next call to Get() will cause them to be refreshed.
func (c *Credentials) Get() (Value, error) {
c.m.Lock()
defer c.m.Unlock()
if c.isExpired() {
creds, err := c.provider.Retrieve()
if err != nil {
return Value{}, err
}
c.creds = creds
c.forceRefresh = false
}
return c.creds, nil
}
// Expire expires the credentials and forces them to be retrieved on the
// next call to Get().
//
// This will override the Provider's expired state, and force Credentials
// to call the Provider's Retrieve().
func (c *Credentials) Expire() {
c.m.Lock()
defer c.m.Unlock()
c.forceRefresh = true
}
// IsExpired returns if the credentials are no longer valid, and need
// to be retrieved.
//
// If the Credentials were forced to be expired with Expire() this will
// reflect that override.
func (c *Credentials) IsExpired() bool {
c.m.Lock()
defer c.m.Unlock()
return c.isExpired()
}
// isExpired helper method wrapping the definition of expired credentials.
func (c *Credentials) isExpired() bool {
return c.forceRefresh || c.provider.IsExpired()
}

View File

@@ -0,0 +1,62 @@
package credentials
import (
"testing"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/stretchr/testify/assert"
)
type stubProvider struct {
creds Value
expired bool
err error
}
func (s *stubProvider) Retrieve() (Value, error) {
s.expired = false
return s.creds, s.err
}
func (s *stubProvider) IsExpired() bool {
return s.expired
}
func TestCredentialsGet(t *testing.T) {
c := NewCredentials(&stubProvider{
creds: Value{
AccessKeyID: "AKID",
SecretAccessKey: "SECRET",
SessionToken: "",
},
expired: true,
})
creds, err := c.Get()
assert.Nil(t, err, "Expected no error")
assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match")
assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match")
assert.Empty(t, creds.SessionToken, "Expect session token to be empty")
}
func TestCredentialsGetWithError(t *testing.T) {
c := NewCredentials(&stubProvider{err: awserr.New("provider error", "", nil), expired: true})
_, err := c.Get()
assert.Equal(t, "provider error", err.(awserr.Error).Code(), "Expected provider error")
}
func TestCredentialsExpire(t *testing.T) {
stub := &stubProvider{}
c := NewCredentials(stub)
stub.expired = false
assert.True(t, c.IsExpired(), "Expected to start out expired")
c.Expire()
assert.True(t, c.IsExpired(), "Expected to be expired")
c.forceRefresh = false
assert.False(t, c.IsExpired(), "Expected not to be expired")
stub.expired = true
assert.True(t, c.IsExpired(), "Expected to be expired")
}

View File

@@ -0,0 +1,173 @@
package ec2rolecreds
import (
"bufio"
"encoding/json"
"fmt"
"path"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
)
// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
// those credentials are expired.
//
// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
// or ExpiryWindow
//
// p := &ec2rolecreds.EC2RoleProvider{
// // Pass in a custom timeout to be used when requesting
// // IAM EC2 Role credentials.
// Client: &http.Client{
// Timeout: 10 * time.Second,
// },
// // Do not use early expiry of credentials. If a non zero value is
// // specified the credentials will be expired early
// ExpiryWindow: 0,
// }
type EC2RoleProvider struct {
credentials.Expiry
// Required EC2Metadata client to use when connecting to EC2 metadata service.
Client *ec2metadata.EC2Metadata
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
// due to ExpiredTokenException exceptions.
//
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
// 10 seconds before the credentials are actually expired.
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
}
// NewCredentials returns a pointer to a new Credentials object wrapping
// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
// The ConfigProvider is satisfied by the session.Session type.
func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
p := &EC2RoleProvider{
Client: ec2metadata.New(c),
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
// metadata service.
func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
p := &EC2RoleProvider{
Client: client,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// Retrieve retrieves credentials from the EC2 service.
// Error will be returned if the request fails, or unable to extract
// the desired credentials.
func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
credsList, err := requestCredList(m.Client)
if err != nil {
return credentials.Value{}, err
}
if len(credsList) == 0 {
return credentials.Value{}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
}
credsName := credsList[0]
roleCreds, err := requestCred(m.Client, credsName)
if err != nil {
return credentials.Value{}, err
}
m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
return credentials.Value{
AccessKeyID: roleCreds.AccessKeyID,
SecretAccessKey: roleCreds.SecretAccessKey,
SessionToken: roleCreds.Token,
}, nil
}
// A ec2RoleCredRespBody provides the shape for unmarshalling credential
// request responses.
type ec2RoleCredRespBody struct {
// Success State
Expiration time.Time
AccessKeyID string
SecretAccessKey string
Token string
// Error state
Code string
Message string
}
const iamSecurityCredsPath = "/iam/security-credentials"
// requestCredList requests a list of credentials from the EC2 service.
// If there are no credentials, or there is an error making or receiving the request
func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
resp, err := client.GetMetadata(iamSecurityCredsPath)
if err != nil {
return nil, awserr.New("EC2RoleRequestError", "failed to list EC2 Roles", err)
}
credsList := []string{}
s := bufio.NewScanner(strings.NewReader(resp))
for s.Scan() {
credsList = append(credsList, s.Text())
}
if err := s.Err(); err != nil {
return nil, awserr.New("SerializationError", "failed to read list of EC2 Roles", err)
}
return credsList, nil
}
// requestCred requests the credentials for a specific credentials from the EC2 service.
//
// If the credentials cannot be found, or there is an error reading the response
// and error will be returned.
func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
if err != nil {
return ec2RoleCredRespBody{},
awserr.New("EC2RoleRequestError",
fmt.Sprintf("failed to get %s EC2 Role credentials", credsName),
err)
}
respCreds := ec2RoleCredRespBody{}
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
return ec2RoleCredRespBody{},
awserr.New("SerializationError",
fmt.Sprintf("failed to decode %s EC2 Role credentials", credsName),
err)
}
if respCreds.Code != "Success" {
// If an error code was returned something failed requesting the role.
return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
}
return respCreds, nil
}

View File

@@ -0,0 +1,159 @@
package ec2rolecreds_test
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
)
const credsRespTmpl = `{
"Code": "Success",
"Type": "AWS-HMAC",
"AccessKeyId" : "accessKey",
"SecretAccessKey" : "secret",
"Token" : "token",
"Expiration" : "%s",
"LastUpdated" : "2009-11-23T0:00:00Z"
}`
const credsFailRespTmpl = `{
"Code": "ErrorCode",
"Message": "ErrorMsg",
"LastUpdated": "2009-11-23T0:00:00Z"
}`
func initTestServer(expireOn string, failAssume bool) *httptest.Server {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/latest/meta-data/iam/security-credentials" {
fmt.Fprintln(w, "RoleName")
} else if r.URL.Path == "/latest/meta-data/iam/security-credentials/RoleName" {
if failAssume {
fmt.Fprintf(w, credsFailRespTmpl)
} else {
fmt.Fprintf(w, credsRespTmpl, expireOn)
}
} else {
http.Error(w, "bad request", http.StatusBadRequest)
}
}))
return server
}
func TestEC2RoleProvider(t *testing.T) {
server := initTestServer("2014-12-16T01:51:37Z", false)
defer server.Close()
p := &ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
}
creds, err := p.Retrieve()
assert.Nil(t, err, "Expect no error, %v", err)
assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
}
func TestEC2RoleProviderFailAssume(t *testing.T) {
server := initTestServer("2014-12-16T01:51:37Z", true)
defer server.Close()
p := &ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
}
creds, err := p.Retrieve()
assert.Error(t, err, "Expect error")
e := err.(awserr.Error)
assert.Equal(t, "ErrorCode", e.Code())
assert.Equal(t, "ErrorMsg", e.Message())
assert.Nil(t, e.OrigErr())
assert.Equal(t, "", creds.AccessKeyID, "Expect access key ID to match")
assert.Equal(t, "", creds.SecretAccessKey, "Expect secret access key to match")
assert.Equal(t, "", creds.SessionToken, "Expect session token to match")
}
func TestEC2RoleProviderIsExpired(t *testing.T) {
server := initTestServer("2014-12-16T01:51:37Z", false)
defer server.Close()
p := &ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
}
p.CurrentTime = func() time.Time {
return time.Date(2014, 12, 15, 21, 26, 0, 0, time.UTC)
}
assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.")
_, err := p.Retrieve()
assert.Nil(t, err, "Expect no error, %v", err)
assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.")
p.CurrentTime = func() time.Time {
return time.Date(3014, 12, 15, 21, 26, 0, 0, time.UTC)
}
assert.True(t, p.IsExpired(), "Expect creds to be expired.")
}
func TestEC2RoleProviderExpiryWindowIsExpired(t *testing.T) {
server := initTestServer("2014-12-16T01:51:37Z", false)
defer server.Close()
p := &ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
ExpiryWindow: time.Hour * 1,
}
p.CurrentTime = func() time.Time {
return time.Date(2014, 12, 15, 0, 51, 37, 0, time.UTC)
}
assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.")
_, err := p.Retrieve()
assert.Nil(t, err, "Expect no error, %v", err)
assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.")
p.CurrentTime = func() time.Time {
return time.Date(2014, 12, 16, 0, 55, 37, 0, time.UTC)
}
assert.True(t, p.IsExpired(), "Expect creds to be expired.")
}
func BenchmarkEC3RoleProvider(b *testing.B) {
server := initTestServer("2014-12-16T01:51:37Z", false)
defer server.Close()
p := &ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")}),
}
_, err := p.Retrieve()
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
if _, err := p.Retrieve(); err != nil {
b.Fatal(err)
}
}
}

View File

@@ -0,0 +1,73 @@
package credentials
import (
"os"
"github.com/aws/aws-sdk-go/aws/awserr"
)
var (
// ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
// found in the process's environment.
//
// @readonly
ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
// ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
// can't be found in the process's environment.
//
// @readonly
ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
)
// A EnvProvider retrieves credentials from the environment variables of the
// running process. Environment credentials never expire.
//
// Environment variables used:
//
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
type EnvProvider struct {
retrieved bool
}
// NewEnvCredentials returns a pointer to a new Credentials object
// wrapping the environment variable provider.
func NewEnvCredentials() *Credentials {
return NewCredentials(&EnvProvider{})
}
// Retrieve retrieves the keys from the environment.
func (e *EnvProvider) Retrieve() (Value, error) {
e.retrieved = false
id := os.Getenv("AWS_ACCESS_KEY_ID")
if id == "" {
id = os.Getenv("AWS_ACCESS_KEY")
}
secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
if secret == "" {
secret = os.Getenv("AWS_SECRET_KEY")
}
if id == "" {
return Value{}, ErrAccessKeyIDNotFound
}
if secret == "" {
return Value{}, ErrSecretAccessKeyNotFound
}
e.retrieved = true
return Value{
AccessKeyID: id,
SecretAccessKey: secret,
SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
}, nil
}
// IsExpired returns if the credentials have been retrieved.
func (e *EnvProvider) IsExpired() bool {
return !e.retrieved
}

View File

@@ -0,0 +1,70 @@
package credentials
import (
"github.com/stretchr/testify/assert"
"os"
"testing"
)
func TestEnvProviderRetrieve(t *testing.T) {
os.Clearenv()
os.Setenv("AWS_ACCESS_KEY_ID", "access")
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
os.Setenv("AWS_SESSION_TOKEN", "token")
e := EnvProvider{}
creds, err := e.Retrieve()
assert.Nil(t, err, "Expect no error")
assert.Equal(t, "access", creds.AccessKeyID, "Expect access key ID to match")
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
}
func TestEnvProviderIsExpired(t *testing.T) {
os.Clearenv()
os.Setenv("AWS_ACCESS_KEY_ID", "access")
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
os.Setenv("AWS_SESSION_TOKEN", "token")
e := EnvProvider{}
assert.True(t, e.IsExpired(), "Expect creds to be expired before retrieve.")
_, err := e.Retrieve()
assert.Nil(t, err, "Expect no error")
assert.False(t, e.IsExpired(), "Expect creds to not be expired after retrieve.")
}
func TestEnvProviderNoAccessKeyID(t *testing.T) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
e := EnvProvider{}
creds, err := e.Retrieve()
assert.Equal(t, ErrAccessKeyIDNotFound, err, "ErrAccessKeyIDNotFound expected, but was %#v error: %#v", creds, err)
}
func TestEnvProviderNoSecretAccessKey(t *testing.T) {
os.Clearenv()
os.Setenv("AWS_ACCESS_KEY_ID", "access")
e := EnvProvider{}
creds, err := e.Retrieve()
assert.Equal(t, ErrSecretAccessKeyNotFound, err, "ErrSecretAccessKeyNotFound expected, but was %#v error: %#v", creds, err)
}
func TestEnvProviderAlternateNames(t *testing.T) {
os.Clearenv()
os.Setenv("AWS_ACCESS_KEY", "access")
os.Setenv("AWS_SECRET_KEY", "secret")
e := EnvProvider{}
creds, err := e.Retrieve()
assert.Nil(t, err, "Expect no error")
assert.Equal(t, "access", creds.AccessKeyID, "Expected access key ID")
assert.Equal(t, "secret", creds.SecretAccessKey, "Expected secret access key")
assert.Empty(t, creds.SessionToken, "Expected no token")
}

View File

@@ -0,0 +1,12 @@
[default]
aws_access_key_id = accessKey
aws_secret_access_key = secret
aws_session_token = token
[no_token]
aws_access_key_id = accessKey
aws_secret_access_key = secret
[with_colon]
aws_access_key_id: accessKey
aws_secret_access_key: secret

View File

@@ -0,0 +1,147 @@
package credentials
import (
"fmt"
"os"
"path/filepath"
"github.com/go-ini/ini"
"github.com/aws/aws-sdk-go/aws/awserr"
)
var (
// ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
//
// @readonly
ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
)
// A SharedCredentialsProvider retrieves credentials from the current user's home
// directory, and keeps track if those credentials are expired.
//
// Profile ini file example: $HOME/.aws/credentials
type SharedCredentialsProvider struct {
// Path to the shared credentials file.
//
// If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
// env value is empty will default to current user's home directory.
// Linux/OSX: "$HOME/.aws/credentials"
// Windows: "%USERPROFILE%\.aws\credentials"
Filename string
// AWS Profile to extract credentials from the shared credentials file. If empty
// will default to environment variable "AWS_PROFILE" or "default" if
// environment variable is also not set.
Profile string
// retrieved states if the credentials have been successfully retrieved.
retrieved bool
}
// NewSharedCredentials returns a pointer to a new Credentials object
// wrapping the Profile file provider.
func NewSharedCredentials(filename, profile string) *Credentials {
return NewCredentials(&SharedCredentialsProvider{
Filename: filename,
Profile: profile,
})
}
// Retrieve reads and extracts the shared credentials from the current
// users home directory.
func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
p.retrieved = false
filename, err := p.filename()
if err != nil {
return Value{}, err
}
creds, err := loadProfile(filename, p.profile())
if err != nil {
return Value{}, err
}
p.retrieved = true
return creds, nil
}
// IsExpired returns if the shared credentials have expired.
func (p *SharedCredentialsProvider) IsExpired() bool {
return !p.retrieved
}
// loadProfiles loads from the file pointed to by shared credentials filename for profile.
// The credentials retrieved from the profile will be returned or error. Error will be
// returned if it fails to read from the file, or the data is invalid.
func loadProfile(filename, profile string) (Value, error) {
config, err := ini.Load(filename)
if err != nil {
return Value{}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
}
iniProfile, err := config.GetSection(profile)
if err != nil {
return Value{}, awserr.New("SharedCredsLoad", "failed to get profile", err)
}
id, err := iniProfile.GetKey("aws_access_key_id")
if err != nil {
return Value{}, awserr.New("SharedCredsAccessKey",
fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
err)
}
secret, err := iniProfile.GetKey("aws_secret_access_key")
if err != nil {
return Value{}, awserr.New("SharedCredsSecret",
fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
nil)
}
// Default to empty string if not found
token := iniProfile.Key("aws_session_token")
return Value{
AccessKeyID: id.String(),
SecretAccessKey: secret.String(),
SessionToken: token.String(),
}, nil
}
// filename returns the filename to use to read AWS shared credentials.
//
// Will return an error if the user's home directory path cannot be found.
func (p *SharedCredentialsProvider) filename() (string, error) {
if p.Filename == "" {
if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" {
return p.Filename, nil
}
homeDir := os.Getenv("HOME") // *nix
if homeDir == "" { // Windows
homeDir = os.Getenv("USERPROFILE")
}
if homeDir == "" {
return "", ErrSharedCredentialsHomeNotFound
}
p.Filename = filepath.Join(homeDir, ".aws", "credentials")
}
return p.Filename, nil
}
// profile returns the AWS shared credentials profile. If empty will read
// environment variable "AWS_PROFILE". If that is not set profile will
// return "default".
func (p *SharedCredentialsProvider) profile() string {
if p.Profile == "" {
p.Profile = os.Getenv("AWS_PROFILE")
}
if p.Profile == "" {
p.Profile = "default"
}
return p.Profile
}

View File

@@ -0,0 +1,100 @@
package credentials
import (
"github.com/stretchr/testify/assert"
"os"
"testing"
)
func TestSharedCredentialsProvider(t *testing.T) {
os.Clearenv()
p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
creds, err := p.Retrieve()
assert.Nil(t, err, "Expect no error")
assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
}
func TestSharedCredentialsProviderIsExpired(t *testing.T) {
os.Clearenv()
p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve")
_, err := p.Retrieve()
assert.Nil(t, err, "Expect no error")
assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve")
}
func TestSharedCredentialsProviderWithAWS_SHARED_CREDENTIALS_FILE(t *testing.T) {
os.Clearenv()
os.Setenv("AWS_SHARED_CREDENTIALS_FILE", "example.ini")
p := SharedCredentialsProvider{}
creds, err := p.Retrieve()
assert.Nil(t, err, "Expect no error")
assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
}
func TestSharedCredentialsProviderWithAWS_PROFILE(t *testing.T) {
os.Clearenv()
os.Setenv("AWS_PROFILE", "no_token")
p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
creds, err := p.Retrieve()
assert.Nil(t, err, "Expect no error")
assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
assert.Empty(t, creds.SessionToken, "Expect no token")
}
func TestSharedCredentialsProviderWithoutTokenFromProfile(t *testing.T) {
os.Clearenv()
p := SharedCredentialsProvider{Filename: "example.ini", Profile: "no_token"}
creds, err := p.Retrieve()
assert.Nil(t, err, "Expect no error")
assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
assert.Empty(t, creds.SessionToken, "Expect no token")
}
func TestSharedCredentialsProviderColonInCredFile(t *testing.T) {
os.Clearenv()
p := SharedCredentialsProvider{Filename: "example.ini", Profile: "with_colon"}
creds, err := p.Retrieve()
assert.Nil(t, err, "Expect no error")
assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
assert.Empty(t, creds.SessionToken, "Expect no token")
}
func BenchmarkSharedCredentialsProvider(b *testing.B) {
os.Clearenv()
p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
_, err := p.Retrieve()
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := p.Retrieve()
if err != nil {
b.Fatal(err)
}
}
}

View File

@@ -0,0 +1,44 @@
package credentials
import (
"github.com/aws/aws-sdk-go/aws/awserr"
)
var (
// ErrStaticCredentialsEmpty is emitted when static credentials are empty.
//
// @readonly
ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
)
// A StaticProvider is a set of credentials which are set pragmatically,
// and will never expire.
type StaticProvider struct {
Value
}
// NewStaticCredentials returns a pointer to a new Credentials object
// wrapping a static credentials value provider.
func NewStaticCredentials(id, secret, token string) *Credentials {
return NewCredentials(&StaticProvider{Value: Value{
AccessKeyID: id,
SecretAccessKey: secret,
SessionToken: token,
}})
}
// Retrieve returns the credentials or error if the credentials are invalid.
func (s *StaticProvider) Retrieve() (Value, error) {
if s.AccessKeyID == "" || s.SecretAccessKey == "" {
return Value{}, ErrStaticCredentialsEmpty
}
return s.Value, nil
}
// IsExpired returns if the credentials are expired.
//
// For StaticProvider, the credentials never expired.
func (s *StaticProvider) IsExpired() bool {
return false
}

View File

@@ -0,0 +1,34 @@
package credentials
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestStaticProviderGet(t *testing.T) {
s := StaticProvider{
Value: Value{
AccessKeyID: "AKID",
SecretAccessKey: "SECRET",
SessionToken: "",
},
}
creds, err := s.Retrieve()
assert.Nil(t, err, "Expect no error")
assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match")
assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match")
assert.Empty(t, creds.SessionToken, "Expect no session token")
}
func TestStaticProviderIsExpired(t *testing.T) {
s := StaticProvider{
Value: Value{
AccessKeyID: "AKID",
SecretAccessKey: "SECRET",
SessionToken: "",
},
}
assert.False(t, s.IsExpired(), "Expect static credentials to never expire")
}

View File

@@ -0,0 +1,130 @@
// Package stscreds are credential Providers to retrieve STS AWS credentials.
//
// STS provides multiple ways to retrieve credentials which can be used when making
// future AWS service API operation calls.
package stscreds
import (
"fmt"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/service/sts"
)
// AssumeRoler represents the minimal subset of the STS client API used by this provider.
type AssumeRoler interface {
AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
}
// DefaultDuration is the default amount of time in minutes that the credentials
// will be valid for.
var DefaultDuration = time.Duration(15) * time.Minute
// AssumeRoleProvider retrieves temporary credentials from the STS service, and
// keeps track of their expiration time. This provider must be used explicitly,
// as it is not included in the credentials chain.
type AssumeRoleProvider struct {
credentials.Expiry
// STS client to make assume role request with.
Client AssumeRoler
// Role to be assumed.
RoleARN string
// Session name, if you wish to reuse the credentials elsewhere.
RoleSessionName string
// Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
Duration time.Duration
// Optional ExternalID to pass along, defaults to nil if not set.
ExternalID *string
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
// due to ExpiredTokenException exceptions.
//
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
// 10 seconds before the credentials are actually expired.
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
}
// NewCredentials returns a pointer to a new Credentials object wrapping the
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
// role will be named after a nanosecond timestamp of this operation.
//
// Takes a Config provider to create the STS client. The ConfigProvider is
// satisfied by the session.Session type.
func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
p := &AssumeRoleProvider{
Client: sts.New(c),
RoleARN: roleARN,
Duration: DefaultDuration,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
// role will be named after a nanosecond timestamp of this operation.
//
// Takes an AssumeRoler which can be satisfiede by the STS client.
func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
p := &AssumeRoleProvider{
Client: svc,
RoleARN: roleARN,
Duration: DefaultDuration,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// Retrieve generates a new set of temporary credentials using STS.
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
// Apply defaults where parameters are not set.
if p.RoleSessionName == "" {
// Try to work out a role name that will hopefully end up unique.
p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
}
if p.Duration == 0 {
// Expire as often as AWS permits.
p.Duration = DefaultDuration
}
roleOutput, err := p.Client.AssumeRole(&sts.AssumeRoleInput{
DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
RoleArn: aws.String(p.RoleARN),
RoleSessionName: aws.String(p.RoleSessionName),
ExternalId: p.ExternalID,
})
if err != nil {
return credentials.Value{}, err
}
// We will proactively generate new credentials before they expire.
p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
return credentials.Value{
AccessKeyID: *roleOutput.Credentials.AccessKeyId,
SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
SessionToken: *roleOutput.Credentials.SessionToken,
}, nil
}

View File

@@ -0,0 +1,56 @@
package stscreds
import (
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/stretchr/testify/assert"
)
type stubSTS struct {
}
func (s *stubSTS) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) {
expiry := time.Now().Add(60 * time.Minute)
return &sts.AssumeRoleOutput{
Credentials: &sts.Credentials{
// Just reflect the role arn to the provider.
AccessKeyId: input.RoleArn,
SecretAccessKey: aws.String("assumedSecretAccessKey"),
SessionToken: aws.String("assumedSessionToken"),
Expiration: &expiry,
},
}, nil
}
func TestAssumeRoleProvider(t *testing.T) {
stub := &stubSTS{}
p := &AssumeRoleProvider{
Client: stub,
RoleARN: "roleARN",
}
creds, err := p.Retrieve()
assert.Nil(t, err, "Expect no error")
assert.Equal(t, "roleARN", creds.AccessKeyID, "Expect access key ID to be reflected role ARN")
assert.Equal(t, "assumedSecretAccessKey", creds.SecretAccessKey, "Expect secret access key to match")
assert.Equal(t, "assumedSessionToken", creds.SessionToken, "Expect session token to match")
}
func BenchmarkAssumeRoleProvider(b *testing.B) {
stub := &stubSTS{}
p := &AssumeRoleProvider{
Client: stub,
RoleARN: "roleARN",
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
if _, err := p.Retrieve(); err != nil {
b.Fatal(err)
}
}
}

View File

@@ -0,0 +1,76 @@
// Package defaults is a collection of helpers to retrieve the SDK's default
// configuration and handlers.
package defaults
import (
"net/http"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/endpoints"
)
// A Defaults provides a collection of default values for SDK clients.
type Defaults struct {
Config *aws.Config
Handlers request.Handlers
}
// Get returns the SDK's default values with Config and handlers pre-configured.
func Get() Defaults {
cfg := Config()
handlers := Handlers()
cfg.Credentials = CredChain(cfg, handlers)
return Defaults{
Config: cfg,
Handlers: handlers,
}
}
// Config returns the default configuration.
func Config() *aws.Config {
return aws.NewConfig().
WithCredentials(credentials.AnonymousCredentials).
WithRegion(os.Getenv("AWS_REGION")).
WithHTTPClient(http.DefaultClient).
WithMaxRetries(aws.UseServiceDefaultRetries).
WithLogger(aws.NewDefaultLogger()).
WithLogLevel(aws.LogOff).
WithSleepDelay(time.Sleep)
}
// Handlers returns the default request handlers.
func Handlers() request.Handlers {
var handlers request.Handlers
handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
handlers.Send.PushBackNamed(corehandlers.SendHandler)
handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
return handlers
}
// CredChain returns the default credential chain.
func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true)
return credentials.NewChainCredentials(
[]credentials.Provider{
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
&ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion),
ExpiryWindow: 5 * time.Minute,
},
})
}

View File

@@ -0,0 +1,43 @@
package ec2metadata
import (
"path"
"github.com/aws/aws-sdk-go/aws/request"
)
// GetMetadata uses the path provided to request
func (c *EC2Metadata) GetMetadata(p string) (string, error) {
op := &request.Operation{
Name: "GetMetadata",
HTTPMethod: "GET",
HTTPPath: path.Join("/", "meta-data", p),
}
output := &metadataOutput{}
req := c.NewRequest(op, nil, output)
return output.Content, req.Send()
}
// Region returns the region the instance is running in.
func (c *EC2Metadata) Region() (string, error) {
resp, err := c.GetMetadata("placement/availability-zone")
if err != nil {
return "", err
}
// returns region without the suffix. Eg: us-west-2a becomes us-west-2
return resp[:len(resp)-1], nil
}
// Available returns if the application has access to the EC2 Metadata service.
// Can be used to determine if application is running within an EC2 Instance and
// the metadata service is available.
func (c *EC2Metadata) Available() bool {
if _, err := c.GetMetadata("instance-id"); err != nil {
return false
}
return true
}

View File

@@ -0,0 +1,101 @@
package ec2metadata_test
import (
"bytes"
"io/ioutil"
"net/http"
"net/http/httptest"
"path"
"testing"
"github.com/stretchr/testify/assert"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
)
func initTestServer(path string, resp string) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.RequestURI != path {
http.Error(w, "not found", http.StatusNotFound)
return
}
w.Write([]byte(resp))
}))
}
func TestEndpoint(t *testing.T) {
c := ec2metadata.New(session.New())
op := &request.Operation{
Name: "GetMetadata",
HTTPMethod: "GET",
HTTPPath: path.Join("/", "meta-data", "testpath"),
}
req := c.NewRequest(op, nil, nil)
assert.Equal(t, "http://169.254.169.254/latest", req.ClientInfo.Endpoint)
assert.Equal(t, "http://169.254.169.254/latest/meta-data/testpath", req.HTTPRequest.URL.String())
}
func TestGetMetadata(t *testing.T) {
server := initTestServer(
"/latest/meta-data/some/path",
"success", // real response includes suffix
)
defer server.Close()
c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
resp, err := c.GetMetadata("some/path")
assert.NoError(t, err)
assert.Equal(t, "success", resp)
}
func TestGetRegion(t *testing.T) {
server := initTestServer(
"/latest/meta-data/placement/availability-zone",
"us-west-2a", // real response includes suffix
)
defer server.Close()
c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
region, err := c.Region()
assert.NoError(t, err)
assert.Equal(t, "us-west-2", region)
}
func TestMetadataAvailable(t *testing.T) {
server := initTestServer(
"/latest/meta-data/instance-id",
"instance-id",
)
defer server.Close()
c := ec2metadata.New(session.New(), &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
available := c.Available()
assert.True(t, available)
}
func TestMetadataNotAvailable(t *testing.T) {
c := ec2metadata.New(session.New())
c.Handlers.Send.Clear()
c.Handlers.Send.PushBack(func(r *request.Request) {
r.HTTPResponse = &http.Response{
StatusCode: int(0),
Status: http.StatusText(int(0)),
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}
r.Error = awserr.New("RequestError", "send request failed", nil)
r.Retryable = aws.Bool(true) // network errors are retryable
})
available := c.Available()
assert.False(t, available)
}

View File

@@ -0,0 +1,116 @@
// Package ec2metadata provides the client for making API calls to the
// EC2 Metadata service.
package ec2metadata
import (
"io/ioutil"
"net"
"net/http"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
)
// ServiceName is the name of the service.
const ServiceName = "ec2metadata"
// A EC2Metadata is an EC2 Metadata service Client.
type EC2Metadata struct {
*client.Client
}
// New creates a new instance of the EC2Metadata client with a session.
// This client is safe to use across multiple goroutines.
//
// Example:
// // Create a EC2Metadata client from just a session.
// svc := ec2metadata.New(mySession)
//
// // Create a EC2Metadata client with additional configuration
// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
c := p.ClientConfig(ServiceName, cfgs...)
return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
}
// NewClient returns a new EC2Metadata client. Should be used to create
// a client when not using a session. Generally using just New with a session
// is preferred.
func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
// If the default http client is provided, replace it with a custom
// client using default timeouts.
if cfg.HTTPClient == http.DefaultClient {
cfg.HTTPClient = &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
// use a shorter timeout than default because the metadata
// service is local if it is running, and to fail faster
// if not running on an ec2 instance.
Timeout: 5 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
},
}
}
svc := &EC2Metadata{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
Endpoint: endpoint,
APIVersion: "latest",
},
handlers,
),
}
svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
svc.Handlers.UnmarshalError.PushBack(unmarshalError)
svc.Handlers.Validate.Clear()
svc.Handlers.Validate.PushBack(validateEndpointHandler)
// Add additional options to the service config
for _, option := range opts {
option(svc.Client)
}
return svc
}
type metadataOutput struct {
Content string
}
func unmarshalHandler(r *request.Request) {
defer r.HTTPResponse.Body.Close()
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
}
data := r.Data.(*metadataOutput)
data.Content = string(b)
}
func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
_, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
}
// TODO extract the error...
}
func validateEndpointHandler(r *request.Request) {
if r.ClientInfo.Endpoint == "" {
r.Error = aws.ErrMissingEndpoint
}
}

View File

@@ -0,0 +1,17 @@
package aws
import "github.com/aws/aws-sdk-go/aws/awserr"
var (
// ErrMissingRegion is an error that is returned if region configuration is
// not found.
//
// @readonly
ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
// ErrMissingEndpoint is an error that is returned if an endpoint cannot be
// resolved for a service.
//
// @readonly
ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
)

View File

@@ -0,0 +1,98 @@
package aws
import (
"log"
"os"
)
// A LogLevelType defines the level logging should be performed at. Used to instruct
// the SDK which statements should be logged.
type LogLevelType uint
// LogLevel returns the pointer to a LogLevel. Should be used to workaround
// not being able to take the address of a non-composite literal.
func LogLevel(l LogLevelType) *LogLevelType {
return &l
}
// Value returns the LogLevel value or the default value LogOff if the LogLevel
// is nil. Safe to use on nil value LogLevelTypes.
func (l *LogLevelType) Value() LogLevelType {
if l != nil {
return *l
}
return LogOff
}
// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
// LogLevel is nill, will default to LogOff comparison.
func (l *LogLevelType) Matches(v LogLevelType) bool {
c := l.Value()
return c&v == v
}
// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default
// to LogOff comparison.
func (l *LogLevelType) AtLeast(v LogLevelType) bool {
c := l.Value()
return c >= v
}
const (
// LogOff states that no logging should be performed by the SDK. This is the
// default state of the SDK, and should be use to disable all logging.
LogOff LogLevelType = iota * 0x1000
// LogDebug state that debug output should be logged by the SDK. This should
// be used to inspect request made and responses received.
LogDebug
)
// Debug Logging Sub Levels
const (
// LogDebugWithSigning states that the SDK should log request signing and
// presigning events. This should be used to log the signing details of
// requests for debugging. Will also enable LogDebug.
LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
// LogDebugWithHTTPBody states the SDK should log HTTP request and response
// HTTP bodys in addition to the headers and path. This should be used to
// see the body content of requests and responses made while using the SDK
// Will also enable LogDebug.
LogDebugWithHTTPBody
// LogDebugWithRequestRetries states the SDK should log when service requests will
// be retried. This should be used to log when you want to log when service
// requests are being retried. Will also enable LogDebug.
LogDebugWithRequestRetries
// LogDebugWithRequestErrors states the SDK should log when service requests fail
// to build, send, validate, or unmarshal.
LogDebugWithRequestErrors
)
// A Logger is a minimalistic interface for the SDK to log messages to. Should
// be used to provide custom logging writers for the SDK to use.
type Logger interface {
Log(...interface{})
}
// NewDefaultLogger returns a Logger which will write log messages to stdout, and
// use same formatting runes as the stdlib log.Logger
func NewDefaultLogger() Logger {
return &defaultLogger{
logger: log.New(os.Stdout, "", log.LstdFlags),
}
}
// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
type defaultLogger struct {
logger *log.Logger
}
// Log logs the parameters to the stdlib logger. See log.Println.
func (l defaultLogger) Log(args ...interface{}) {
l.logger.Println(args...)
}

View File

@@ -0,0 +1,140 @@
package request
import (
"fmt"
"strings"
)
// A Handlers provides a collection of request handlers for various
// stages of handling requests.
type Handlers struct {
Validate HandlerList
Build HandlerList
Sign HandlerList
Send HandlerList
ValidateResponse HandlerList
Unmarshal HandlerList
UnmarshalMeta HandlerList
UnmarshalError HandlerList
Retry HandlerList
AfterRetry HandlerList
}
// Copy returns of this handler's lists.
func (h *Handlers) Copy() Handlers {
return Handlers{
Validate: h.Validate.copy(),
Build: h.Build.copy(),
Sign: h.Sign.copy(),
Send: h.Send.copy(),
ValidateResponse: h.ValidateResponse.copy(),
Unmarshal: h.Unmarshal.copy(),
UnmarshalError: h.UnmarshalError.copy(),
UnmarshalMeta: h.UnmarshalMeta.copy(),
Retry: h.Retry.copy(),
AfterRetry: h.AfterRetry.copy(),
}
}
// Clear removes callback functions for all handlers
func (h *Handlers) Clear() {
h.Validate.Clear()
h.Build.Clear()
h.Send.Clear()
h.Sign.Clear()
h.Unmarshal.Clear()
h.UnmarshalMeta.Clear()
h.UnmarshalError.Clear()
h.ValidateResponse.Clear()
h.Retry.Clear()
h.AfterRetry.Clear()
}
// A HandlerList manages zero or more handlers in a list.
type HandlerList struct {
list []NamedHandler
}
// A NamedHandler is a struct that contains a name and function callback.
type NamedHandler struct {
Name string
Fn func(*Request)
}
// copy creates a copy of the handler list.
func (l *HandlerList) copy() HandlerList {
var n HandlerList
n.list = append([]NamedHandler{}, l.list...)
return n
}
// Clear clears the handler list.
func (l *HandlerList) Clear() {
l.list = []NamedHandler{}
}
// Len returns the number of handlers in the list.
func (l *HandlerList) Len() int {
return len(l.list)
}
// PushBack pushes handler f to the back of the handler list.
func (l *HandlerList) PushBack(f func(*Request)) {
l.list = append(l.list, NamedHandler{"__anonymous", f})
}
// PushFront pushes handler f to the front of the handler list.
func (l *HandlerList) PushFront(f func(*Request)) {
l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...)
}
// PushBackNamed pushes named handler f to the back of the handler list.
func (l *HandlerList) PushBackNamed(n NamedHandler) {
l.list = append(l.list, n)
}
// PushFrontNamed pushes named handler f to the front of the handler list.
func (l *HandlerList) PushFrontNamed(n NamedHandler) {
l.list = append([]NamedHandler{n}, l.list...)
}
// Remove removes a NamedHandler n
func (l *HandlerList) Remove(n NamedHandler) {
newlist := []NamedHandler{}
for _, m := range l.list {
if m.Name != n.Name {
newlist = append(newlist, m)
}
}
l.list = newlist
}
// Run executes all handlers in the list with a given request object.
func (l *HandlerList) Run(r *Request) {
for _, f := range l.list {
f.Fn(r)
}
}
// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
// header. If the extra parameters are provided they will be added as metadata to the
// name/version pair resulting in the following format.
// "name/version (extra0; extra1; ...)"
// The user agent part will be concatenated with this current request's user agent string.
func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
ua := fmt.Sprintf("%s/%s", name, version)
if len(extra) > 0 {
ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
}
return func(r *Request) {
AddToUserAgent(r, ua)
}
}
// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
// The input string will be concatenated with the current request's user agent string.
func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
return func(r *Request) {
AddToUserAgent(r, s)
}
}

View File

@@ -0,0 +1,47 @@
package request_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
)
func TestHandlerList(t *testing.T) {
s := ""
r := &request.Request{}
l := request.HandlerList{}
l.PushBack(func(r *request.Request) {
s += "a"
r.Data = s
})
l.Run(r)
assert.Equal(t, "a", s)
assert.Equal(t, "a", r.Data)
}
func TestMultipleHandlers(t *testing.T) {
r := &request.Request{}
l := request.HandlerList{}
l.PushBack(func(r *request.Request) { r.Data = nil })
l.PushFront(func(r *request.Request) { r.Data = aws.Bool(true) })
l.Run(r)
if r.Data != nil {
t.Error("Expected handler to execute")
}
}
func TestNamedHandlers(t *testing.T) {
l := request.HandlerList{}
named := request.NamedHandler{Name: "Name", Fn: func(r *request.Request) {}}
named2 := request.NamedHandler{Name: "NotName", Fn: func(r *request.Request) {}}
l.PushBackNamed(named)
l.PushBackNamed(named)
l.PushBackNamed(named2)
l.PushBack(func(r *request.Request) {})
assert.Equal(t, 4, l.Len())
l.Remove(named)
assert.Equal(t, 2, l.Len())
}

View File

@@ -0,0 +1,279 @@
package request
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client/metadata"
)
// A Request is the service request to be made.
type Request struct {
Config aws.Config
ClientInfo metadata.ClientInfo
Handlers Handlers
Retryer
Time time.Time
ExpireTime time.Duration
Operation *Operation
HTTPRequest *http.Request
HTTPResponse *http.Response
Body io.ReadSeeker
BodyStart int64 // offset from beginning of Body that the request body starts
Params interface{}
Error error
Data interface{}
RequestID string
RetryCount int
Retryable *bool
RetryDelay time.Duration
built bool
}
// An Operation is the service API operation to be made.
type Operation struct {
Name string
HTTPMethod string
HTTPPath string
*Paginator
}
// Paginator keeps track of pagination configuration for an API operation.
type Paginator struct {
InputTokens []string
OutputTokens []string
LimitToken string
TruncationToken string
}
// New returns a new Request pointer for the service API
// operation and parameters.
//
// Params is any value of input parameters to be the request payload.
// Data is pointer value to an object which the request's response
// payload will be deserialized to.
func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
method := operation.HTTPMethod
if method == "" {
method = "POST"
}
p := operation.HTTPPath
if p == "" {
p = "/"
}
httpReq, _ := http.NewRequest(method, "", nil)
httpReq.URL, _ = url.Parse(clientInfo.Endpoint + p)
r := &Request{
Config: cfg,
ClientInfo: clientInfo,
Handlers: handlers.Copy(),
Retryer: retryer,
Time: time.Now(),
ExpireTime: 0,
Operation: operation,
HTTPRequest: httpReq,
Body: nil,
Params: params,
Error: nil,
Data: data,
}
r.SetBufferBody([]byte{})
return r
}
// WillRetry returns if the request's can be retried.
func (r *Request) WillRetry() bool {
return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
}
// ParamsFilled returns if the request's parameters have been populated
// and the parameters are valid. False is returned if no parameters are
// provided or invalid.
func (r *Request) ParamsFilled() bool {
return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
}
// DataFilled returns true if the request's data for response deserialization
// target has been set and is a valid. False is returned if data is not
// set, or is invalid.
func (r *Request) DataFilled() bool {
return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
}
// SetBufferBody will set the request's body bytes that will be sent to
// the service API.
func (r *Request) SetBufferBody(buf []byte) {
r.SetReaderBody(bytes.NewReader(buf))
}
// SetStringBody sets the body of the request to be backed by a string.
func (r *Request) SetStringBody(s string) {
r.SetReaderBody(strings.NewReader(s))
}
// SetReaderBody will set the request's body reader.
func (r *Request) SetReaderBody(reader io.ReadSeeker) {
r.HTTPRequest.Body = ioutil.NopCloser(reader)
r.Body = reader
}
// Presign returns the request's signed URL. Error will be returned
// if the signing fails.
func (r *Request) Presign(expireTime time.Duration) (string, error) {
r.ExpireTime = expireTime
r.Sign()
if r.Error != nil {
return "", r.Error
}
return r.HTTPRequest.URL.String(), nil
}
func debugLogReqError(r *Request, stage string, retrying bool, err error) {
if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
return
}
retryStr := "not retrying"
if retrying {
retryStr = "will retry"
}
r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
}
// Build will build the request's object so it can be signed and sent
// to the service. Build will also validate all the request's parameters.
// Anny additional build Handlers set on this request will be run
// in the order they were set.
//
// The request will only be built once. Multiple calls to build will have
// no effect.
//
// If any Validate or Build errors occur the build will stop and the error
// which occurred will be returned.
func (r *Request) Build() error {
if !r.built {
r.Error = nil
r.Handlers.Validate.Run(r)
if r.Error != nil {
debugLogReqError(r, "Validate Request", false, r.Error)
return r.Error
}
r.Handlers.Build.Run(r)
r.built = true
}
return r.Error
}
// Sign will sign the request retuning error if errors are encountered.
//
// Send will build the request prior to signing. All Sign Handlers will
// be executed in the order they were set.
func (r *Request) Sign() error {
r.Build()
if r.Error != nil {
debugLogReqError(r, "Build Request", false, r.Error)
return r.Error
}
r.Handlers.Sign.Run(r)
return r.Error
}
// Send will send the request returning error if errors are encountered.
//
// Send will sign the request prior to sending. All Send Handlers will
// be executed in the order they were set.
func (r *Request) Send() error {
for {
r.Sign()
if r.Error != nil {
return r.Error
}
if aws.BoolValue(r.Retryable) {
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
}
// Re-seek the body back to the original point in for a retry so that
// send will send the body's contents again in the upcoming request.
r.Body.Seek(r.BodyStart, 0)
r.HTTPRequest.Body = ioutil.NopCloser(r.Body)
}
r.Retryable = nil
r.Handlers.Send.Run(r)
if r.Error != nil {
err := r.Error
r.Handlers.Retry.Run(r)
r.Handlers.AfterRetry.Run(r)
if r.Error != nil {
debugLogReqError(r, "Send Request", false, r.Error)
return r.Error
}
debugLogReqError(r, "Send Request", true, err)
continue
}
r.Handlers.UnmarshalMeta.Run(r)
r.Handlers.ValidateResponse.Run(r)
if r.Error != nil {
err := r.Error
r.Handlers.UnmarshalError.Run(r)
r.Handlers.Retry.Run(r)
r.Handlers.AfterRetry.Run(r)
if r.Error != nil {
debugLogReqError(r, "Validate Response", false, r.Error)
return r.Error
}
debugLogReqError(r, "Validate Response", true, err)
continue
}
r.Handlers.Unmarshal.Run(r)
if r.Error != nil {
err := r.Error
r.Handlers.Retry.Run(r)
r.Handlers.AfterRetry.Run(r)
if r.Error != nil {
debugLogReqError(r, "Unmarshal Response", false, r.Error)
return r.Error
}
debugLogReqError(r, "Unmarshal Response", true, err)
continue
}
break
}
return nil
}
// AddToUserAgent adds the string to the end of the request's current user agent.
func AddToUserAgent(r *Request, s string) {
curUA := r.HTTPRequest.Header.Get("User-Agent")
if len(curUA) > 0 {
s = curUA + " " + s
}
r.HTTPRequest.Header.Set("User-Agent", s)
}

View File

@@ -0,0 +1,96 @@
package request
import (
"reflect"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
)
//type Paginater interface {
// HasNextPage() bool
// NextPage() *Request
// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error
//}
// HasNextPage returns true if this request has more pages of data available.
func (r *Request) HasNextPage() bool {
return len(r.nextPageTokens()) > 0
}
// nextPageTokens returns the tokens to use when asking for the next page of
// data.
func (r *Request) nextPageTokens() []interface{} {
if r.Operation.Paginator == nil {
return nil
}
if r.Operation.TruncationToken != "" {
tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
if len(tr) == 0 {
return nil
}
switch v := tr[0].(type) {
case *bool:
if !aws.BoolValue(v) {
return nil
}
case bool:
if v == false {
return nil
}
}
}
tokens := []interface{}{}
for _, outToken := range r.Operation.OutputTokens {
v, _ := awsutil.ValuesAtPath(r.Data, outToken)
if len(v) > 0 {
tokens = append(tokens, v[0])
}
}
return tokens
}
// NextPage returns a new Request that can be executed to return the next
// page of result data. Call .Send() on this request to execute it.
func (r *Request) NextPage() *Request {
tokens := r.nextPageTokens()
if len(tokens) == 0 {
return nil
}
data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
for i, intok := range nr.Operation.InputTokens {
awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
}
return nr
}
// EachPage iterates over each page of a paginated request object. The fn
// parameter should be a function with the following sample signature:
//
// func(page *T, lastPage bool) bool {
// return true // return false to stop iterating
// }
//
// Where "T" is the structure type matching the output structure of the given
// operation. For example, a request object generated by
// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
// as the structure "T". The lastPage value represents whether the page is
// the last page of data or not. The return value of this function should
// return true to keep iterating or false to stop.
func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
for page := r; page != nil; page = page.NextPage() {
page.Send()
shouldContinue := fn(page.Data, !page.HasNextPage())
if page.Error != nil || !shouldContinue {
return page.Error
}
}
return nil
}

View File

@@ -0,0 +1,392 @@
package request_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/awstesting/unit"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/s3"
)
// Use DynamoDB methods for simplicity
func TestPaginationQueryPage(t *testing.T) {
db := dynamodb.New(unit.Session)
tokens, pages, numPages, gotToEnd := []map[string]*dynamodb.AttributeValue{}, []map[string]*dynamodb.AttributeValue{}, 0, false
reqNum := 0
resps := []*dynamodb.QueryOutput{
{
LastEvaluatedKey: map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key1")}},
Count: aws.Int64(1),
Items: []map[string]*dynamodb.AttributeValue{
map[string]*dynamodb.AttributeValue{
"key": {S: aws.String("key1")},
},
},
},
{
LastEvaluatedKey: map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key2")}},
Count: aws.Int64(1),
Items: []map[string]*dynamodb.AttributeValue{
map[string]*dynamodb.AttributeValue{
"key": {S: aws.String("key2")},
},
},
},
{
LastEvaluatedKey: map[string]*dynamodb.AttributeValue{},
Count: aws.Int64(1),
Items: []map[string]*dynamodb.AttributeValue{
map[string]*dynamodb.AttributeValue{
"key": {S: aws.String("key3")},
},
},
},
}
db.Handlers.Send.Clear() // mock sending
db.Handlers.Unmarshal.Clear()
db.Handlers.UnmarshalMeta.Clear()
db.Handlers.ValidateResponse.Clear()
db.Handlers.Build.PushBack(func(r *request.Request) {
in := r.Params.(*dynamodb.QueryInput)
if in == nil {
tokens = append(tokens, nil)
} else if len(in.ExclusiveStartKey) != 0 {
tokens = append(tokens, in.ExclusiveStartKey)
}
})
db.Handlers.Unmarshal.PushBack(func(r *request.Request) {
r.Data = resps[reqNum]
reqNum++
})
params := &dynamodb.QueryInput{
Limit: aws.Int64(2),
TableName: aws.String("tablename"),
}
err := db.QueryPages(params, func(p *dynamodb.QueryOutput, last bool) bool {
numPages++
for _, item := range p.Items {
pages = append(pages, item)
}
if last {
if gotToEnd {
assert.Fail(t, "last=true happened twice")
}
gotToEnd = true
}
return true
})
assert.Nil(t, err)
assert.Equal(t,
[]map[string]*dynamodb.AttributeValue{
map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key1")}},
map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key2")}},
}, tokens)
assert.Equal(t,
[]map[string]*dynamodb.AttributeValue{
map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key1")}},
map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key2")}},
map[string]*dynamodb.AttributeValue{"key": {S: aws.String("key3")}},
}, pages)
assert.Equal(t, 3, numPages)
assert.True(t, gotToEnd)
assert.Nil(t, params.ExclusiveStartKey)
}
// Use DynamoDB methods for simplicity
func TestPagination(t *testing.T) {
db := dynamodb.New(unit.Session)
tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false
reqNum := 0
resps := []*dynamodb.ListTablesOutput{
{TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")},
{TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")},
{TableNames: []*string{aws.String("Table5")}},
}
db.Handlers.Send.Clear() // mock sending
db.Handlers.Unmarshal.Clear()
db.Handlers.UnmarshalMeta.Clear()
db.Handlers.ValidateResponse.Clear()
db.Handlers.Build.PushBack(func(r *request.Request) {
in := r.Params.(*dynamodb.ListTablesInput)
if in == nil {
tokens = append(tokens, "")
} else if in.ExclusiveStartTableName != nil {
tokens = append(tokens, *in.ExclusiveStartTableName)
}
})
db.Handlers.Unmarshal.PushBack(func(r *request.Request) {
r.Data = resps[reqNum]
reqNum++
})
params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool {
numPages++
for _, t := range p.TableNames {
pages = append(pages, *t)
}
if last {
if gotToEnd {
assert.Fail(t, "last=true happened twice")
}
gotToEnd = true
}
return true
})
assert.Equal(t, []string{"Table2", "Table4"}, tokens)
assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages)
assert.Equal(t, 3, numPages)
assert.True(t, gotToEnd)
assert.Nil(t, err)
assert.Nil(t, params.ExclusiveStartTableName)
}
// Use DynamoDB methods for simplicity
func TestPaginationEachPage(t *testing.T) {
db := dynamodb.New(unit.Session)
tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false
reqNum := 0
resps := []*dynamodb.ListTablesOutput{
{TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")},
{TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")},
{TableNames: []*string{aws.String("Table5")}},
}
db.Handlers.Send.Clear() // mock sending
db.Handlers.Unmarshal.Clear()
db.Handlers.UnmarshalMeta.Clear()
db.Handlers.ValidateResponse.Clear()
db.Handlers.Build.PushBack(func(r *request.Request) {
in := r.Params.(*dynamodb.ListTablesInput)
if in == nil {
tokens = append(tokens, "")
} else if in.ExclusiveStartTableName != nil {
tokens = append(tokens, *in.ExclusiveStartTableName)
}
})
db.Handlers.Unmarshal.PushBack(func(r *request.Request) {
r.Data = resps[reqNum]
reqNum++
})
params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
req, _ := db.ListTablesRequest(params)
err := req.EachPage(func(p interface{}, last bool) bool {
numPages++
for _, t := range p.(*dynamodb.ListTablesOutput).TableNames {
pages = append(pages, *t)
}
if last {
if gotToEnd {
assert.Fail(t, "last=true happened twice")
}
gotToEnd = true
}
return true
})
assert.Equal(t, []string{"Table2", "Table4"}, tokens)
assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages)
assert.Equal(t, 3, numPages)
assert.True(t, gotToEnd)
assert.Nil(t, err)
}
// Use DynamoDB methods for simplicity
func TestPaginationEarlyExit(t *testing.T) {
db := dynamodb.New(unit.Session)
numPages, gotToEnd := 0, false
reqNum := 0
resps := []*dynamodb.ListTablesOutput{
{TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")},
{TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")},
{TableNames: []*string{aws.String("Table5")}},
}
db.Handlers.Send.Clear() // mock sending
db.Handlers.Unmarshal.Clear()
db.Handlers.UnmarshalMeta.Clear()
db.Handlers.ValidateResponse.Clear()
db.Handlers.Unmarshal.PushBack(func(r *request.Request) {
r.Data = resps[reqNum]
reqNum++
})
params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool {
numPages++
if numPages == 2 {
return false
}
if last {
if gotToEnd {
assert.Fail(t, "last=true happened twice")
}
gotToEnd = true
}
return true
})
assert.Equal(t, 2, numPages)
assert.False(t, gotToEnd)
assert.Nil(t, err)
}
func TestSkipPagination(t *testing.T) {
client := s3.New(unit.Session)
client.Handlers.Send.Clear() // mock sending
client.Handlers.Unmarshal.Clear()
client.Handlers.UnmarshalMeta.Clear()
client.Handlers.ValidateResponse.Clear()
client.Handlers.Unmarshal.PushBack(func(r *request.Request) {
r.Data = &s3.HeadBucketOutput{}
})
req, _ := client.HeadBucketRequest(&s3.HeadBucketInput{Bucket: aws.String("bucket")})
numPages, gotToEnd := 0, false
req.EachPage(func(p interface{}, last bool) bool {
numPages++
if last {
gotToEnd = true
}
return true
})
assert.Equal(t, 1, numPages)
assert.True(t, gotToEnd)
}
// Use S3 for simplicity
func TestPaginationTruncation(t *testing.T) {
client := s3.New(unit.Session)
reqNum := 0
resps := []*s3.ListObjectsOutput{
{IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key1")}}},
{IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key2")}}},
{IsTruncated: aws.Bool(false), Contents: []*s3.Object{{Key: aws.String("Key3")}}},
{IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key4")}}},
}
client.Handlers.Send.Clear() // mock sending
client.Handlers.Unmarshal.Clear()
client.Handlers.UnmarshalMeta.Clear()
client.Handlers.ValidateResponse.Clear()
client.Handlers.Unmarshal.PushBack(func(r *request.Request) {
r.Data = resps[reqNum]
reqNum++
})
params := &s3.ListObjectsInput{Bucket: aws.String("bucket")}
results := []string{}
err := client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool {
results = append(results, *p.Contents[0].Key)
return true
})
assert.Equal(t, []string{"Key1", "Key2", "Key3"}, results)
assert.Nil(t, err)
// Try again without truncation token at all
reqNum = 0
resps[1].IsTruncated = nil
resps[2].IsTruncated = aws.Bool(true)
results = []string{}
err = client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool {
results = append(results, *p.Contents[0].Key)
return true
})
assert.Equal(t, []string{"Key1", "Key2"}, results)
assert.Nil(t, err)
}
// Benchmarks
var benchResps = []*dynamodb.ListTablesOutput{
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
{TableNames: []*string{aws.String("TABLE")}},
}
var benchDb = func() *dynamodb.DynamoDB {
db := dynamodb.New(unit.Session)
db.Handlers.Send.Clear() // mock sending
db.Handlers.Unmarshal.Clear()
db.Handlers.UnmarshalMeta.Clear()
db.Handlers.ValidateResponse.Clear()
return db
}
func BenchmarkCodegenIterator(b *testing.B) {
reqNum := 0
db := benchDb()
db.Handlers.Unmarshal.PushBack(func(r *request.Request) {
r.Data = benchResps[reqNum]
reqNum++
})
input := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
iter := func(fn func(*dynamodb.ListTablesOutput, bool) bool) error {
page, _ := db.ListTablesRequest(input)
for ; page != nil; page = page.NextPage() {
page.Send()
out := page.Data.(*dynamodb.ListTablesOutput)
if result := fn(out, !page.HasNextPage()); page.Error != nil || !result {
return page.Error
}
}
return nil
}
for i := 0; i < b.N; i++ {
reqNum = 0
iter(func(p *dynamodb.ListTablesOutput, last bool) bool {
return true
})
}
}
func BenchmarkEachPageIterator(b *testing.B) {
reqNum := 0
db := benchDb()
db.Handlers.Unmarshal.PushBack(func(r *request.Request) {
r.Data = benchResps[reqNum]
reqNum++
})
input := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
for i := 0; i < b.N; i++ {
reqNum = 0
req, _ := db.ListTablesRequest(input)
req.EachPage(func(p interface{}, last bool) bool {
return true
})
}
}

View File

@@ -0,0 +1,261 @@
package request_test
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"runtime"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/awstesting"
)
type testData struct {
Data string
}
func body(str string) io.ReadCloser {
return ioutil.NopCloser(bytes.NewReader([]byte(str)))
}
func unmarshal(req *request.Request) {
defer req.HTTPResponse.Body.Close()
if req.Data != nil {
json.NewDecoder(req.HTTPResponse.Body).Decode(req.Data)
}
return
}
func unmarshalError(req *request.Request) {
bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body)
if err != nil {
req.Error = awserr.New("UnmarshaleError", req.HTTPResponse.Status, err)
return
}
if len(bodyBytes) == 0 {
req.Error = awserr.NewRequestFailure(
awserr.New("UnmarshaleError", req.HTTPResponse.Status, fmt.Errorf("empty body")),
req.HTTPResponse.StatusCode,
"",
)
return
}
var jsonErr jsonErrorResponse
if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil {
req.Error = awserr.New("UnmarshaleError", "JSON unmarshal", err)
return
}
req.Error = awserr.NewRequestFailure(
awserr.New(jsonErr.Code, jsonErr.Message, nil),
req.HTTPResponse.StatusCode,
"",
)
}
type jsonErrorResponse struct {
Code string `json:"__type"`
Message string `json:"message"`
}
// test that retries occur for 5xx status codes
func TestRequestRecoverRetry5xx(t *testing.T) {
reqNum := 0
reqs := []http.Response{
{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
{StatusCode: 501, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
{StatusCode: 200, Body: body(`{"data":"valid"}`)},
}
s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10))
s.Handlers.Validate.Clear()
s.Handlers.Unmarshal.PushBack(unmarshal)
s.Handlers.UnmarshalError.PushBack(unmarshalError)
s.Handlers.Send.Clear() // mock sending
s.Handlers.Send.PushBack(func(r *request.Request) {
r.HTTPResponse = &reqs[reqNum]
reqNum++
})
out := &testData{}
r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
err := r.Send()
assert.Nil(t, err)
assert.Equal(t, 2, int(r.RetryCount))
assert.Equal(t, "valid", out.Data)
}
// test that retries occur for 4xx status codes with a response type that can be retried - see `shouldRetry`
func TestRequestRecoverRetry4xxRetryable(t *testing.T) {
reqNum := 0
reqs := []http.Response{
{StatusCode: 400, Body: body(`{"__type":"Throttling","message":"Rate exceeded."}`)},
{StatusCode: 429, Body: body(`{"__type":"ProvisionedThroughputExceededException","message":"Rate exceeded."}`)},
{StatusCode: 200, Body: body(`{"data":"valid"}`)},
}
s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10))
s.Handlers.Validate.Clear()
s.Handlers.Unmarshal.PushBack(unmarshal)
s.Handlers.UnmarshalError.PushBack(unmarshalError)
s.Handlers.Send.Clear() // mock sending
s.Handlers.Send.PushBack(func(r *request.Request) {
r.HTTPResponse = &reqs[reqNum]
reqNum++
})
out := &testData{}
r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
err := r.Send()
assert.Nil(t, err)
assert.Equal(t, 2, int(r.RetryCount))
assert.Equal(t, "valid", out.Data)
}
// test that retries don't occur for 4xx status codes with a response type that can't be retried
func TestRequest4xxUnretryable(t *testing.T) {
s := awstesting.NewClient(aws.NewConfig().WithMaxRetries(10))
s.Handlers.Validate.Clear()
s.Handlers.Unmarshal.PushBack(unmarshal)
s.Handlers.UnmarshalError.PushBack(unmarshalError)
s.Handlers.Send.Clear() // mock sending
s.Handlers.Send.PushBack(func(r *request.Request) {
r.HTTPResponse = &http.Response{StatusCode: 401, Body: body(`{"__type":"SignatureDoesNotMatch","message":"Signature does not match."}`)}
})
out := &testData{}
r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
err := r.Send()
assert.NotNil(t, err)
if e, ok := err.(awserr.RequestFailure); ok {
assert.Equal(t, 401, e.StatusCode())
} else {
assert.Fail(t, "Expected error to be a service failure")
}
assert.Equal(t, "SignatureDoesNotMatch", err.(awserr.Error).Code())
assert.Equal(t, "Signature does not match.", err.(awserr.Error).Message())
assert.Equal(t, 0, int(r.RetryCount))
}
func TestRequestExhaustRetries(t *testing.T) {
delays := []time.Duration{}
sleepDelay := func(delay time.Duration) {
delays = append(delays, delay)
}
reqNum := 0
reqs := []http.Response{
{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
}
s := awstesting.NewClient(aws.NewConfig().WithSleepDelay(sleepDelay))
s.Handlers.Validate.Clear()
s.Handlers.Unmarshal.PushBack(unmarshal)
s.Handlers.UnmarshalError.PushBack(unmarshalError)
s.Handlers.Send.Clear() // mock sending
s.Handlers.Send.PushBack(func(r *request.Request) {
r.HTTPResponse = &reqs[reqNum]
reqNum++
})
r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
err := r.Send()
assert.NotNil(t, err)
if e, ok := err.(awserr.RequestFailure); ok {
assert.Equal(t, 500, e.StatusCode())
} else {
assert.Fail(t, "Expected error to be a service failure")
}
assert.Equal(t, "UnknownError", err.(awserr.Error).Code())
assert.Equal(t, "An error occurred.", err.(awserr.Error).Message())
assert.Equal(t, 3, int(r.RetryCount))
expectDelays := []struct{ min, max time.Duration }{{30, 59}, {60, 118}, {120, 236}}
for i, v := range delays {
min := expectDelays[i].min * time.Millisecond
max := expectDelays[i].max * time.Millisecond
assert.True(t, min <= v && v <= max,
"Expect delay to be within range, i:%d, v:%s, min:%s, max:%s", i, v, min, max)
}
}
// test that the request is retried after the credentials are expired.
func TestRequestRecoverExpiredCreds(t *testing.T) {
reqNum := 0
reqs := []http.Response{
{StatusCode: 400, Body: body(`{"__type":"ExpiredTokenException","message":"expired token"}`)},
{StatusCode: 200, Body: body(`{"data":"valid"}`)},
}
s := awstesting.NewClient(&aws.Config{MaxRetries: aws.Int(10), Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "")})
s.Handlers.Validate.Clear()
s.Handlers.Unmarshal.PushBack(unmarshal)
s.Handlers.UnmarshalError.PushBack(unmarshalError)
credExpiredBeforeRetry := false
credExpiredAfterRetry := false
s.Handlers.AfterRetry.PushBack(func(r *request.Request) {
credExpiredAfterRetry = r.Config.Credentials.IsExpired()
})
s.Handlers.Sign.Clear()
s.Handlers.Sign.PushBack(func(r *request.Request) {
r.Config.Credentials.Get()
})
s.Handlers.Send.Clear() // mock sending
s.Handlers.Send.PushBack(func(r *request.Request) {
r.HTTPResponse = &reqs[reqNum]
reqNum++
})
out := &testData{}
r := s.NewRequest(&request.Operation{Name: "Operation"}, nil, out)
err := r.Send()
assert.Nil(t, err)
assert.False(t, credExpiredBeforeRetry, "Expect valid creds before retry check")
assert.True(t, credExpiredAfterRetry, "Expect expired creds after retry check")
assert.False(t, s.Config.Credentials.IsExpired(), "Expect valid creds after cred expired recovery")
assert.Equal(t, 1, int(r.RetryCount))
assert.Equal(t, "valid", out.Data)
}
func TestMakeAddtoUserAgentHandler(t *testing.T) {
fn := request.MakeAddToUserAgentHandler("name", "version", "extra1", "extra2")
r := &request.Request{HTTPRequest: &http.Request{Header: http.Header{}}}
r.HTTPRequest.Header.Set("User-Agent", "foo/bar")
fn(r)
assert.Equal(t, "foo/bar name/version (extra1; extra2)", r.HTTPRequest.Header.Get("User-Agent"))
}
func TestMakeAddtoUserAgentFreeFormHandler(t *testing.T) {
fn := request.MakeAddToUserAgentFreeFormHandler("name/version (extra1; extra2)")
r := &request.Request{HTTPRequest: &http.Request{Header: http.Header{}}}
r.HTTPRequest.Header.Set("User-Agent", "foo/bar")
fn(r)
assert.Equal(t, "foo/bar name/version (extra1; extra2)", r.HTTPRequest.Header.Get("User-Agent"))
}
func TestRequestUserAgent(t *testing.T) {
s := awstesting.NewClient(&aws.Config{Region: aws.String("us-east-1")})
// s.Handlers.Validate.Clear()
req := s.NewRequest(&request.Operation{Name: "Operation"}, nil, &testData{})
req.HTTPRequest.Header.Set("User-Agent", "foo/bar")
assert.NoError(t, req.Build())
expectUA := fmt.Sprintf("foo/bar %s/%s (%s; %s; %s)",
aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH)
assert.Equal(t, expectUA, req.HTTPRequest.Header.Get("User-Agent"))
}

View File

@@ -0,0 +1,74 @@
package request
import (
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
)
// Retryer is an interface to control retry logic for a given service.
// The default implementation used by most services is the service.DefaultRetryer
// structure, which contains basic retry logic using exponential backoff.
type Retryer interface {
RetryRules(*Request) time.Duration
ShouldRetry(*Request) bool
MaxRetries() int
}
// retryableCodes is a collection of service response codes which are retry-able
// without any further action.
var retryableCodes = map[string]struct{}{
"RequestError": {},
"RequestTimeout": {},
"ProvisionedThroughputExceededException": {},
"Throttling": {},
"ThrottlingException": {},
"RequestLimitExceeded": {},
"RequestThrottled": {},
"LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once
"TooManyRequestsException": {}, // Lambda functions
}
// credsExpiredCodes is a collection of error codes which signify the credentials
// need to be refreshed. Expired tokens require refreshing of credentials, and
// resigning before the request can be retried.
var credsExpiredCodes = map[string]struct{}{
"ExpiredToken": {},
"ExpiredTokenException": {},
"RequestExpired": {}, // EC2 Only
}
func isCodeRetryable(code string) bool {
if _, ok := retryableCodes[code]; ok {
return true
}
return isCodeExpiredCreds(code)
}
func isCodeExpiredCreds(code string) bool {
_, ok := credsExpiredCodes[code]
return ok
}
// IsErrorRetryable returns whether the error is retryable, based on its Code.
// Returns false if the request has no Error set.
func (r *Request) IsErrorRetryable() bool {
if r.Error != nil {
if err, ok := r.Error.(awserr.Error); ok {
return isCodeRetryable(err.Code())
}
}
return false
}
// IsErrorExpired returns whether the error code is a credential expiry error.
// Returns false if the request has no Error set.
func (r *Request) IsErrorExpired() bool {
if r.Error != nil {
if err, ok := r.Error.(awserr.Error); ok {
return isCodeExpiredCreds(err.Code())
}
}
return false
}

View File

@@ -0,0 +1,105 @@
// Package session provides a way to create service clients with shared configuration
// and handlers.
package session
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/endpoints"
)
// A Session provides a central location to create service clients from and
// store configurations and request handlers for those services.
//
// Sessions are safe to create service clients concurrently, but it is not safe
// to mutate the session concurrently.
type Session struct {
Config *aws.Config
Handlers request.Handlers
}
// New creates a new instance of the handlers merging in the provided Configs
// on top of the SDK's default configurations. Once the session is created it
// can be mutated to modify Configs or Handlers. The session is safe to be read
// concurrently, but it should not be written to concurrently.
//
// Example:
// // Create a session with the default config and request handlers.
// sess := session.New()
//
// // Create a session with a custom region
// sess := session.New(&aws.Config{Region: aws.String("us-east-1")})
//
// // Create a session, and add additional handlers for all service
// // clients created with the session to inherit. Adds logging handler.
// sess := session.New()
// sess.Handlers.Send.PushFront(func(r *request.Request) {
// // Log every request made and its payload
// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params)
// })
//
// // Create a S3 client instance from a session
// sess := session.New()
// svc := s3.New(sess)
func New(cfgs ...*aws.Config) *Session {
def := defaults.Get()
s := &Session{
Config: def.Config,
Handlers: def.Handlers,
}
s.Config.MergeIn(cfgs...)
initHandlers(s)
return s
}
func initHandlers(s *Session) {
// Add the Validate parameter handler if it is not disabled.
s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
if !aws.BoolValue(s.Config.DisableParamValidation) {
s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
}
}
// Copy creates and returns a copy of the current session, coping the config
// and handlers. If any additional configs are provided they will be merged
// on top of the session's copied config.
//
// Example:
// // Create a copy of the current session, configured for the us-west-2 region.
// sess.Copy(&aws.Config{Region: aws.String("us-west-2"})
func (s *Session) Copy(cfgs ...*aws.Config) *Session {
newSession := &Session{
Config: s.Config.Copy(cfgs...),
Handlers: s.Handlers.Copy(),
}
initHandlers(newSession)
return newSession
}
// ClientConfig satisfies the client.ConfigProvider interface and is used to
// configure the service client instances. Passing the Session to the service
// client's constructor (New) will use this method to configure the client.
//
// Example:
// sess := session.New()
// s3.New(sess)
func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
s = s.Copy(cfgs...)
endpoint, signingRegion := endpoints.NormalizeEndpoint(
aws.StringValue(s.Config.Endpoint), serviceName,
aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL))
return client.Config{
Config: s.Config,
Handlers: s.Handlers,
Endpoint: endpoint,
SigningRegion: signingRegion,
}
}

View File

@@ -0,0 +1,20 @@
package session_test
import (
"net/http"
"testing"
"github.com/stretchr/testify/assert"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
)
func TestNewDefaultSession(t *testing.T) {
s := session.New(&aws.Config{Region: aws.String("region")})
assert.Equal(t, "region", *s.Config.Region)
assert.Equal(t, http.DefaultClient, s.Config.HTTPClient)
assert.NotNil(t, s.Config.Logger)
assert.Equal(t, aws.LogOff, *s.Config.LogLevel)
}

View File

@@ -0,0 +1,88 @@
package aws
import (
"io"
"sync"
)
// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser
func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
return ReaderSeekerCloser{r}
}
// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
// io.Closer interfaces to the underlying object if they are available.
type ReaderSeekerCloser struct {
r io.Reader
}
// Read reads from the reader up to size of p. The number of bytes read, and
// error if it occurred will be returned.
//
// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
//
// Performs the same functionality as io.Reader Read
func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
switch t := r.r.(type) {
case io.Reader:
return t.Read(p)
}
return 0, nil
}
// Seek sets the offset for the next Read to offset, interpreted according to
// whence: 0 means relative to the origin of the file, 1 means relative to the
// current offset, and 2 means relative to the end. Seek returns the new offset
// and an error, if any.
//
// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
switch t := r.r.(type) {
case io.Seeker:
return t.Seek(offset, whence)
}
return int64(0), nil
}
// Close closes the ReaderSeekerCloser.
//
// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
func (r ReaderSeekerCloser) Close() error {
switch t := r.r.(type) {
case io.Closer:
return t.Close()
}
return nil
}
// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
// Can be used with the s3manager.Downloader to download content to a buffer
// in memory. Safe to use concurrently.
type WriteAtBuffer struct {
buf []byte
m sync.Mutex
}
// WriteAt writes a slice of bytes to a buffer starting at the position provided
// The number of bytes written will be returned, or error. Can overwrite previous
// written slices if the write ats overlap.
func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
b.m.Lock()
defer b.m.Unlock()
expLen := pos + int64(len(p))
if int64(len(b.buf)) < expLen {
newBuf := make([]byte, expLen)
copy(newBuf, b.buf)
b.buf = newBuf
}
copy(b.buf[pos:], p)
return len(p), nil
}
// Bytes returns a slice of bytes written to the buffer.
func (b *WriteAtBuffer) Bytes() []byte {
b.m.Lock()
defer b.m.Unlock()
return b.buf[:len(b.buf):len(b.buf)]
}

View File

@@ -0,0 +1,56 @@
package aws
import (
"math/rand"
"testing"
"github.com/stretchr/testify/assert"
)
func TestWriteAtBuffer(t *testing.T) {
b := &WriteAtBuffer{}
n, err := b.WriteAt([]byte{1}, 0)
assert.NoError(t, err)
assert.Equal(t, 1, n)
n, err = b.WriteAt([]byte{1, 1, 1}, 5)
assert.NoError(t, err)
assert.Equal(t, 3, n)
n, err = b.WriteAt([]byte{2}, 1)
assert.NoError(t, err)
assert.Equal(t, 1, n)
n, err = b.WriteAt([]byte{3}, 2)
assert.NoError(t, err)
assert.Equal(t, 1, n)
assert.Equal(t, []byte{1, 2, 3, 0, 0, 1, 1, 1}, b.Bytes())
}
func BenchmarkWriteAtBuffer(b *testing.B) {
buf := &WriteAtBuffer{}
r := rand.New(rand.NewSource(1))
b.ResetTimer()
for i := 0; i < b.N; i++ {
to := r.Intn(10) * 4096
bs := make([]byte, to)
buf.WriteAt(bs, r.Int63n(10)*4096)
}
}
func BenchmarkWriteAtBufferParallel(b *testing.B) {
buf := &WriteAtBuffer{}
r := rand.New(rand.NewSource(1))
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
to := r.Intn(10) * 4096
bs := make([]byte, to)
buf.WriteAt(bs, r.Int63n(10)*4096)
}
})
}

View File

@@ -0,0 +1,8 @@
// Package aws provides core functionality for making requests to AWS services.
package aws
// SDKName is the name of this AWS SDK
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "0.10.4"

View File

@@ -0,0 +1,31 @@
// Package endpoints validates regional endpoints for services.
package endpoints
//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
//go:generate gofmt -s -w endpoints_map.go
import "strings"
// EndpointForRegion returns an endpoint and its signing region for a service and region.
// if the service and region pair are not found endpoint and signingRegion will be empty.
func EndpointForRegion(svcName, region string) (endpoint, signingRegion string) {
derivedKeys := []string{
region + "/" + svcName,
region + "/*",
"*/" + svcName,
"*/*",
}
for _, key := range derivedKeys {
if val, ok := endpointsMap.Endpoints[key]; ok {
ep := val.Endpoint
ep = strings.Replace(ep, "{region}", region, -1)
ep = strings.Replace(ep, "{service}", svcName, -1)
endpoint = ep
signingRegion = val.SigningRegion
return
}
}
return
}

View File

@@ -0,0 +1,77 @@
{
"version": 2,
"endpoints": {
"*/*": {
"endpoint": "{service}.{region}.amazonaws.com"
},
"cn-north-1/*": {
"endpoint": "{service}.{region}.amazonaws.com.cn",
"signatureVersion": "v4"
},
"us-gov-west-1/iam": {
"endpoint": "iam.us-gov.amazonaws.com"
},
"us-gov-west-1/sts": {
"endpoint": "sts.us-gov-west-1.amazonaws.com"
},
"us-gov-west-1/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"*/cloudfront": {
"endpoint": "cloudfront.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/cloudsearchdomain": {
"endpoint": "",
"signingRegion": "us-east-1"
},
"*/iam": {
"endpoint": "iam.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/importexport": {
"endpoint": "importexport.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/route53": {
"endpoint": "route53.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/sts": {
"endpoint": "sts.amazonaws.com",
"signingRegion": "us-east-1"
},
"us-east-1/sdb": {
"endpoint": "sdb.amazonaws.com",
"signingRegion": "us-east-1"
},
"us-east-1/s3": {
"endpoint": "s3.amazonaws.com"
},
"us-west-1/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"us-west-2/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"eu-west-1/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"ap-southeast-1/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"ap-southeast-2/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"ap-northeast-1/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"sa-east-1/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"eu-central-1/s3": {
"endpoint": "{service}.{region}.amazonaws.com",
"signatureVersion": "v4"
}
}
}

View File

@@ -0,0 +1,89 @@
package endpoints
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
type endpointStruct struct {
Version int
Endpoints map[string]endpointEntry
}
type endpointEntry struct {
Endpoint string
SigningRegion string
}
var endpointsMap = endpointStruct{
Version: 2,
Endpoints: map[string]endpointEntry{
"*/*": {
Endpoint: "{service}.{region}.amazonaws.com",
},
"*/cloudfront": {
Endpoint: "cloudfront.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/cloudsearchdomain": {
Endpoint: "",
SigningRegion: "us-east-1",
},
"*/iam": {
Endpoint: "iam.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/importexport": {
Endpoint: "importexport.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/route53": {
Endpoint: "route53.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/sts": {
Endpoint: "sts.amazonaws.com",
SigningRegion: "us-east-1",
},
"ap-northeast-1/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"ap-southeast-1/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"ap-southeast-2/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"cn-north-1/*": {
Endpoint: "{service}.{region}.amazonaws.com.cn",
},
"eu-central-1/s3": {
Endpoint: "{service}.{region}.amazonaws.com",
},
"eu-west-1/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"sa-east-1/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"us-east-1/s3": {
Endpoint: "s3.amazonaws.com",
},
"us-east-1/sdb": {
Endpoint: "sdb.amazonaws.com",
SigningRegion: "us-east-1",
},
"us-gov-west-1/iam": {
Endpoint: "iam.us-gov.amazonaws.com",
},
"us-gov-west-1/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"us-gov-west-1/sts": {
Endpoint: "sts.us-gov-west-1.amazonaws.com",
},
"us-west-1/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"us-west-2/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
},
}

View File

@@ -0,0 +1,28 @@
package endpoints
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestGlobalEndpoints(t *testing.T) {
region := "mock-region-1"
svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts"}
for _, name := range svcs {
ep, sr := EndpointForRegion(name, region)
assert.Equal(t, name+".amazonaws.com", ep)
assert.Equal(t, "us-east-1", sr)
}
}
func TestServicesInCN(t *testing.T) {
region := "cn-north-1"
svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts", "s3"}
for _, name := range svcs {
ep, _ := EndpointForRegion(name, region)
assert.Equal(t, name+"."+region+".amazonaws.com.cn", ep)
}
}

View File

@@ -0,0 +1,32 @@
// Package ec2query provides serialisation of AWS EC2 requests and responses.
package ec2query
//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/ec2.json build_test.go
import (
"net/url"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/internal/protocol/query/queryutil"
)
// Build builds a request for the EC2 protocol.
func Build(r *aws.Request) {
body := url.Values{
"Action": {r.Operation.Name},
"Version": {r.Service.APIVersion},
}
if err := queryutil.Parse(body, r.Params, true); err != nil {
r.Error = awserr.New("SerializationError", "failed encoding EC2 Query request", err)
}
if r.ExpireTime == 0 {
r.HTTPRequest.Method = "POST"
r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
r.SetBufferBody([]byte(body.Encode()))
} else { // This is a pre-signed request
r.HTTPRequest.Method = "GET"
r.HTTPRequest.URL.RawQuery = body.Encode()
}
}

View File

@@ -0,0 +1,860 @@
package ec2query_test
import (
"bytes"
"encoding/json"
"encoding/xml"
"io"
"io/ioutil"
"net/http"
"net/url"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/internal/protocol/ec2query"
"github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil"
"github.com/aws/aws-sdk-go/internal/signer/v4"
"github.com/aws/aws-sdk-go/internal/util"
"github.com/stretchr/testify/assert"
)
var _ bytes.Buffer // always import bytes
var _ http.Request
var _ json.Marshaler
var _ time.Time
var _ xmlutil.XMLNode
var _ xml.Attr
var _ = ioutil.Discard
var _ = util.Trim("")
var _ = url.Values{}
var _ = io.EOF
type InputService1ProtocolTest struct {
*aws.Service
}
// New returns a new InputService1ProtocolTest client.
func NewInputService1ProtocolTest(config *aws.Config) *InputService1ProtocolTest {
service := &aws.Service{
Config: aws.DefaultConfig.Merge(config),
ServiceName: "inputservice1protocoltest",
APIVersion: "2014-01-01",
}
service.Initialize()
// Handlers
service.Handlers.Sign.PushBack(v4.Sign)
service.Handlers.Build.PushBack(ec2query.Build)
service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
return &InputService1ProtocolTest{service}
}
// newRequest creates a new request for a InputService1ProtocolTest operation and runs any
// custom request initialization.
func (c *InputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
req := aws.NewRequest(c.Service, op, params, data)
return req
}
const opInputService1TestCaseOperation1 = "OperationName"
// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation.
func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputShape) (req *aws.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) {
op := &aws.Operation{
Name: opInputService1TestCaseOperation1,
}
if input == nil {
input = &InputService1TestShapeInputShape{}
}
req = c.newRequest(op, input, output)
output = &InputService1TestShapeInputService1TestCaseOperation1Output{}
req.Data = output
return
}
func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) {
req, out := c.InputService1TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type InputService1TestShapeInputService1TestCaseOperation1Output struct {
metadataInputService1TestShapeInputService1TestCaseOperation1Output `json:"-" xml:"-"`
}
type metadataInputService1TestShapeInputService1TestCaseOperation1Output struct {
SDKShapeTraits bool `type:"structure"`
}
type InputService1TestShapeInputShape struct {
Bar *string `type:"string"`
Foo *string `type:"string"`
metadataInputService1TestShapeInputShape `json:"-" xml:"-"`
}
type metadataInputService1TestShapeInputShape struct {
SDKShapeTraits bool `type:"structure"`
}
type InputService2ProtocolTest struct {
*aws.Service
}
// New returns a new InputService2ProtocolTest client.
func NewInputService2ProtocolTest(config *aws.Config) *InputService2ProtocolTest {
service := &aws.Service{
Config: aws.DefaultConfig.Merge(config),
ServiceName: "inputservice2protocoltest",
APIVersion: "2014-01-01",
}
service.Initialize()
// Handlers
service.Handlers.Sign.PushBack(v4.Sign)
service.Handlers.Build.PushBack(ec2query.Build)
service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
return &InputService2ProtocolTest{service}
}
// newRequest creates a new request for a InputService2ProtocolTest operation and runs any
// custom request initialization.
func (c *InputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
req := aws.NewRequest(c.Service, op, params, data)
return req
}
const opInputService2TestCaseOperation1 = "OperationName"
// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation.
func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputShape) (req *aws.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) {
op := &aws.Operation{
Name: opInputService2TestCaseOperation1,
}
if input == nil {
input = &InputService2TestShapeInputShape{}
}
req = c.newRequest(op, input, output)
output = &InputService2TestShapeInputService2TestCaseOperation1Output{}
req.Data = output
return
}
func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputShape) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) {
req, out := c.InputService2TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type InputService2TestShapeInputService2TestCaseOperation1Output struct {
metadataInputService2TestShapeInputService2TestCaseOperation1Output `json:"-" xml:"-"`
}
type metadataInputService2TestShapeInputService2TestCaseOperation1Output struct {
SDKShapeTraits bool `type:"structure"`
}
type InputService2TestShapeInputShape struct {
Bar *string `locationName:"barLocationName" type:"string"`
Foo *string `type:"string"`
Yuck *string `locationName:"yuckLocationName" queryName:"yuckQueryName" type:"string"`
metadataInputService2TestShapeInputShape `json:"-" xml:"-"`
}
type metadataInputService2TestShapeInputShape struct {
SDKShapeTraits bool `type:"structure"`
}
type InputService3ProtocolTest struct {
*aws.Service
}
// New returns a new InputService3ProtocolTest client.
func NewInputService3ProtocolTest(config *aws.Config) *InputService3ProtocolTest {
service := &aws.Service{
Config: aws.DefaultConfig.Merge(config),
ServiceName: "inputservice3protocoltest",
APIVersion: "2014-01-01",
}
service.Initialize()
// Handlers
service.Handlers.Sign.PushBack(v4.Sign)
service.Handlers.Build.PushBack(ec2query.Build)
service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
return &InputService3ProtocolTest{service}
}
// newRequest creates a new request for a InputService3ProtocolTest operation and runs any
// custom request initialization.
func (c *InputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
req := aws.NewRequest(c.Service, op, params, data)
return req
}
const opInputService3TestCaseOperation1 = "OperationName"
// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation.
func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *aws.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) {
op := &aws.Operation{
Name: opInputService3TestCaseOperation1,
}
if input == nil {
input = &InputService3TestShapeInputShape{}
}
req = c.newRequest(op, input, output)
output = &InputService3TestShapeInputService3TestCaseOperation1Output{}
req.Data = output
return
}
func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) {
req, out := c.InputService3TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type InputService3TestShapeInputService3TestCaseOperation1Output struct {
metadataInputService3TestShapeInputService3TestCaseOperation1Output `json:"-" xml:"-"`
}
type metadataInputService3TestShapeInputService3TestCaseOperation1Output struct {
SDKShapeTraits bool `type:"structure"`
}
type InputService3TestShapeInputShape struct {
StructArg *InputService3TestShapeStructType `locationName:"Struct" type:"structure"`
metadataInputService3TestShapeInputShape `json:"-" xml:"-"`
}
type metadataInputService3TestShapeInputShape struct {
SDKShapeTraits bool `type:"structure"`
}
type InputService3TestShapeStructType struct {
ScalarArg *string `locationName:"Scalar" type:"string"`
metadataInputService3TestShapeStructType `json:"-" xml:"-"`
}
type metadataInputService3TestShapeStructType struct {
SDKShapeTraits bool `type:"structure"`
}
type InputService4ProtocolTest struct {
*aws.Service
}
// New returns a new InputService4ProtocolTest client.
func NewInputService4ProtocolTest(config *aws.Config) *InputService4ProtocolTest {
service := &aws.Service{
Config: aws.DefaultConfig.Merge(config),
ServiceName: "inputservice4protocoltest",
APIVersion: "2014-01-01",
}
service.Initialize()
// Handlers
service.Handlers.Sign.PushBack(v4.Sign)
service.Handlers.Build.PushBack(ec2query.Build)
service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
return &InputService4ProtocolTest{service}
}
// newRequest creates a new request for a InputService4ProtocolTest operation and runs any
// custom request initialization.
func (c *InputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
req := aws.NewRequest(c.Service, op, params, data)
return req
}
const opInputService4TestCaseOperation1 = "OperationName"
// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation.
func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputShape) (req *aws.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) {
op := &aws.Operation{
Name: opInputService4TestCaseOperation1,
}
if input == nil {
input = &InputService4TestShapeInputShape{}
}
req = c.newRequest(op, input, output)
output = &InputService4TestShapeInputService4TestCaseOperation1Output{}
req.Data = output
return
}
func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) {
req, out := c.InputService4TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type InputService4TestShapeInputService4TestCaseOperation1Output struct {
metadataInputService4TestShapeInputService4TestCaseOperation1Output `json:"-" xml:"-"`
}
type metadataInputService4TestShapeInputService4TestCaseOperation1Output struct {
SDKShapeTraits bool `type:"structure"`
}
type InputService4TestShapeInputShape struct {
ListArg []*string `type:"list"`
metadataInputService4TestShapeInputShape `json:"-" xml:"-"`
}
type metadataInputService4TestShapeInputShape struct {
SDKShapeTraits bool `type:"structure"`
}
type InputService5ProtocolTest struct {
*aws.Service
}
// New returns a new InputService5ProtocolTest client.
func NewInputService5ProtocolTest(config *aws.Config) *InputService5ProtocolTest {
service := &aws.Service{
Config: aws.DefaultConfig.Merge(config),
ServiceName: "inputservice5protocoltest",
APIVersion: "2014-01-01",
}
service.Initialize()
// Handlers
service.Handlers.Sign.PushBack(v4.Sign)
service.Handlers.Build.PushBack(ec2query.Build)
service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
return &InputService5ProtocolTest{service}
}
// newRequest creates a new request for a InputService5ProtocolTest operation and runs any
// custom request initialization.
func (c *InputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
req := aws.NewRequest(c.Service, op, params, data)
return req
}
const opInputService5TestCaseOperation1 = "OperationName"
// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation.
func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputShape) (req *aws.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) {
op := &aws.Operation{
Name: opInputService5TestCaseOperation1,
}
if input == nil {
input = &InputService5TestShapeInputShape{}
}
req = c.newRequest(op, input, output)
output = &InputService5TestShapeInputService5TestCaseOperation1Output{}
req.Data = output
return
}
func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) {
req, out := c.InputService5TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type InputService5TestShapeInputService5TestCaseOperation1Output struct {
metadataInputService5TestShapeInputService5TestCaseOperation1Output `json:"-" xml:"-"`
}
type metadataInputService5TestShapeInputService5TestCaseOperation1Output struct {
SDKShapeTraits bool `type:"structure"`
}
type InputService5TestShapeInputShape struct {
ListArg []*string `locationName:"ListMemberName" locationNameList:"item" type:"list"`
metadataInputService5TestShapeInputShape `json:"-" xml:"-"`
}
type metadataInputService5TestShapeInputShape struct {
SDKShapeTraits bool `type:"structure"`
}
type InputService6ProtocolTest struct {
*aws.Service
}
// New returns a new InputService6ProtocolTest client.
func NewInputService6ProtocolTest(config *aws.Config) *InputService6ProtocolTest {
service := &aws.Service{
Config: aws.DefaultConfig.Merge(config),
ServiceName: "inputservice6protocoltest",
APIVersion: "2014-01-01",
}
service.Initialize()
// Handlers
service.Handlers.Sign.PushBack(v4.Sign)
service.Handlers.Build.PushBack(ec2query.Build)
service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
return &InputService6ProtocolTest{service}
}
// newRequest creates a new request for a InputService6ProtocolTest operation and runs any
// custom request initialization.
func (c *InputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
req := aws.NewRequest(c.Service, op, params, data)
return req
}
const opInputService6TestCaseOperation1 = "OperationName"
// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation.
func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputShape) (req *aws.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) {
op := &aws.Operation{
Name: opInputService6TestCaseOperation1,
}
if input == nil {
input = &InputService6TestShapeInputShape{}
}
req = c.newRequest(op, input, output)
output = &InputService6TestShapeInputService6TestCaseOperation1Output{}
req.Data = output
return
}
func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputShape) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) {
req, out := c.InputService6TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type InputService6TestShapeInputService6TestCaseOperation1Output struct {
metadataInputService6TestShapeInputService6TestCaseOperation1Output `json:"-" xml:"-"`
}
type metadataInputService6TestShapeInputService6TestCaseOperation1Output struct {
SDKShapeTraits bool `type:"structure"`
}
type InputService6TestShapeInputShape struct {
ListArg []*string `locationName:"ListMemberName" queryName:"ListQueryName" locationNameList:"item" type:"list"`
metadataInputService6TestShapeInputShape `json:"-" xml:"-"`
}
type metadataInputService6TestShapeInputShape struct {
SDKShapeTraits bool `type:"structure"`
}
type InputService7ProtocolTest struct {
*aws.Service
}
// New returns a new InputService7ProtocolTest client.
func NewInputService7ProtocolTest(config *aws.Config) *InputService7ProtocolTest {
service := &aws.Service{
Config: aws.DefaultConfig.Merge(config),
ServiceName: "inputservice7protocoltest",
APIVersion: "2014-01-01",
}
service.Initialize()
// Handlers
service.Handlers.Sign.PushBack(v4.Sign)
service.Handlers.Build.PushBack(ec2query.Build)
service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
return &InputService7ProtocolTest{service}
}
// newRequest creates a new request for a InputService7ProtocolTest operation and runs any
// custom request initialization.
func (c *InputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
req := aws.NewRequest(c.Service, op, params, data)
return req
}
const opInputService7TestCaseOperation1 = "OperationName"
// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation.
func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputShape) (req *aws.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) {
op := &aws.Operation{
Name: opInputService7TestCaseOperation1,
}
if input == nil {
input = &InputService7TestShapeInputShape{}
}
req = c.newRequest(op, input, output)
output = &InputService7TestShapeInputService7TestCaseOperation1Output{}
req.Data = output
return
}
func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputShape) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) {
req, out := c.InputService7TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type InputService7TestShapeInputService7TestCaseOperation1Output struct {
metadataInputService7TestShapeInputService7TestCaseOperation1Output `json:"-" xml:"-"`
}
type metadataInputService7TestShapeInputService7TestCaseOperation1Output struct {
SDKShapeTraits bool `type:"structure"`
}
type InputService7TestShapeInputShape struct {
BlobArg []byte `type:"blob"`
metadataInputService7TestShapeInputShape `json:"-" xml:"-"`
}
type metadataInputService7TestShapeInputShape struct {
SDKShapeTraits bool `type:"structure"`
}
type InputService8ProtocolTest struct {
*aws.Service
}
// New returns a new InputService8ProtocolTest client.
func NewInputService8ProtocolTest(config *aws.Config) *InputService8ProtocolTest {
service := &aws.Service{
Config: aws.DefaultConfig.Merge(config),
ServiceName: "inputservice8protocoltest",
APIVersion: "2014-01-01",
}
service.Initialize()
// Handlers
service.Handlers.Sign.PushBack(v4.Sign)
service.Handlers.Build.PushBack(ec2query.Build)
service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
return &InputService8ProtocolTest{service}
}
// newRequest creates a new request for a InputService8ProtocolTest operation and runs any
// custom request initialization.
func (c *InputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
req := aws.NewRequest(c.Service, op, params, data)
return req
}
const opInputService8TestCaseOperation1 = "OperationName"
// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation.
func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputShape) (req *aws.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) {
op := &aws.Operation{
Name: opInputService8TestCaseOperation1,
}
if input == nil {
input = &InputService8TestShapeInputShape{}
}
req = c.newRequest(op, input, output)
output = &InputService8TestShapeInputService8TestCaseOperation1Output{}
req.Data = output
return
}
func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputShape) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) {
req, out := c.InputService8TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type InputService8TestShapeInputService8TestCaseOperation1Output struct {
metadataInputService8TestShapeInputService8TestCaseOperation1Output `json:"-" xml:"-"`
}
type metadataInputService8TestShapeInputService8TestCaseOperation1Output struct {
SDKShapeTraits bool `type:"structure"`
}
type InputService8TestShapeInputShape struct {
TimeArg *time.Time `type:"timestamp" timestampFormat:"iso8601"`
metadataInputService8TestShapeInputShape `json:"-" xml:"-"`
}
type metadataInputService8TestShapeInputShape struct {
SDKShapeTraits bool `type:"structure"`
}
//
// Tests begin here
//
func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) {
svc := NewInputService1ProtocolTest(nil)
svc.Endpoint = "https://test"
input := &InputService1TestShapeInputShape{
Bar: aws.String("val2"),
Foo: aws.String("val1"),
}
req, _ := svc.InputService1TestCaseOperation1Request(input)
r := req.HTTPRequest
// build request
ec2query.Build(req)
assert.NoError(t, req.Error)
// assert body
assert.NotNil(t, r.Body)
body, _ := ioutil.ReadAll(r.Body)
assert.Equal(t, util.Trim(`Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`), util.Trim(string(body)))
// assert URL
assert.Equal(t, "https://test/", r.URL.String())
// assert headers
}
func TestInputService2ProtocolTestStructureWithLocationNameAndQueryNameAppliedToMembersCase1(t *testing.T) {
svc := NewInputService2ProtocolTest(nil)
svc.Endpoint = "https://test"
input := &InputService2TestShapeInputShape{
Bar: aws.String("val2"),
Foo: aws.String("val1"),
Yuck: aws.String("val3"),
}
req, _ := svc.InputService2TestCaseOperation1Request(input)
r := req.HTTPRequest
// build request
ec2query.Build(req)
assert.NoError(t, req.Error)
// assert body
assert.NotNil(t, r.Body)
body, _ := ioutil.ReadAll(r.Body)
assert.Equal(t, util.Trim(`Action=OperationName&BarLocationName=val2&Foo=val1&Version=2014-01-01&yuckQueryName=val3`), util.Trim(string(body)))
// assert URL
assert.Equal(t, "https://test/", r.URL.String())
// assert headers
}
func TestInputService3ProtocolTestNestedStructureMembersCase1(t *testing.T) {
svc := NewInputService3ProtocolTest(nil)
svc.Endpoint = "https://test"
input := &InputService3TestShapeInputShape{
StructArg: &InputService3TestShapeStructType{
ScalarArg: aws.String("foo"),
},
}
req, _ := svc.InputService3TestCaseOperation1Request(input)
r := req.HTTPRequest
// build request
ec2query.Build(req)
assert.NoError(t, req.Error)
// assert body
assert.NotNil(t, r.Body)
body, _ := ioutil.ReadAll(r.Body)
assert.Equal(t, util.Trim(`Action=OperationName&Struct.Scalar=foo&Version=2014-01-01`), util.Trim(string(body)))
// assert URL
assert.Equal(t, "https://test/", r.URL.String())
// assert headers
}
func TestInputService4ProtocolTestListTypesCase1(t *testing.T) {
svc := NewInputService4ProtocolTest(nil)
svc.Endpoint = "https://test"
input := &InputService4TestShapeInputShape{
ListArg: []*string{
aws.String("foo"),
aws.String("bar"),
aws.String("baz"),
},
}
req, _ := svc.InputService4TestCaseOperation1Request(input)
r := req.HTTPRequest
// build request
ec2query.Build(req)
assert.NoError(t, req.Error)
// assert body
assert.NotNil(t, r.Body)
body, _ := ioutil.ReadAll(r.Body)
assert.Equal(t, util.Trim(`Action=OperationName&ListArg.1=foo&ListArg.2=bar&ListArg.3=baz&Version=2014-01-01`), util.Trim(string(body)))
// assert URL
assert.Equal(t, "https://test/", r.URL.String())
// assert headers
}
func TestInputService5ProtocolTestListWithLocationNameAppliedToMemberCase1(t *testing.T) {
svc := NewInputService5ProtocolTest(nil)
svc.Endpoint = "https://test"
input := &InputService5TestShapeInputShape{
ListArg: []*string{
aws.String("a"),
aws.String("b"),
aws.String("c"),
},
}
req, _ := svc.InputService5TestCaseOperation1Request(input)
r := req.HTTPRequest
// build request
ec2query.Build(req)
assert.NoError(t, req.Error)
// assert body
assert.NotNil(t, r.Body)
body, _ := ioutil.ReadAll(r.Body)
assert.Equal(t, util.Trim(`Action=OperationName&ListMemberName.1=a&ListMemberName.2=b&ListMemberName.3=c&Version=2014-01-01`), util.Trim(string(body)))
// assert URL
assert.Equal(t, "https://test/", r.URL.String())
// assert headers
}
func TestInputService6ProtocolTestListWithLocationNameAndQueryNameCase1(t *testing.T) {
svc := NewInputService6ProtocolTest(nil)
svc.Endpoint = "https://test"
input := &InputService6TestShapeInputShape{
ListArg: []*string{
aws.String("a"),
aws.String("b"),
aws.String("c"),
},
}
req, _ := svc.InputService6TestCaseOperation1Request(input)
r := req.HTTPRequest
// build request
ec2query.Build(req)
assert.NoError(t, req.Error)
// assert body
assert.NotNil(t, r.Body)
body, _ := ioutil.ReadAll(r.Body)
assert.Equal(t, util.Trim(`Action=OperationName&ListQueryName.1=a&ListQueryName.2=b&ListQueryName.3=c&Version=2014-01-01`), util.Trim(string(body)))
// assert URL
assert.Equal(t, "https://test/", r.URL.String())
// assert headers
}
func TestInputService7ProtocolTestBase64EncodedBlobsCase1(t *testing.T) {
svc := NewInputService7ProtocolTest(nil)
svc.Endpoint = "https://test"
input := &InputService7TestShapeInputShape{
BlobArg: []byte("foo"),
}
req, _ := svc.InputService7TestCaseOperation1Request(input)
r := req.HTTPRequest
// build request
ec2query.Build(req)
assert.NoError(t, req.Error)
// assert body
assert.NotNil(t, r.Body)
body, _ := ioutil.ReadAll(r.Body)
assert.Equal(t, util.Trim(`Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`), util.Trim(string(body)))
// assert URL
assert.Equal(t, "https://test/", r.URL.String())
// assert headers
}
func TestInputService8ProtocolTestTimestampValuesCase1(t *testing.T) {
svc := NewInputService8ProtocolTest(nil)
svc.Endpoint = "https://test"
input := &InputService8TestShapeInputShape{
TimeArg: aws.Time(time.Unix(1422172800, 0)),
}
req, _ := svc.InputService8TestCaseOperation1Request(input)
r := req.HTTPRequest
// build request
ec2query.Build(req)
assert.NoError(t, req.Error)
// assert body
assert.NotNil(t, r.Body)
body, _ := ioutil.ReadAll(r.Body)
assert.Equal(t, util.Trim(`Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`), util.Trim(string(body)))
// assert URL
assert.Equal(t, "https://test/", r.URL.String())
// assert headers
}

View File

@@ -0,0 +1,54 @@
package ec2query
//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/ec2.json unmarshal_test.go
import (
"encoding/xml"
"io"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil"
)
// Unmarshal unmarshals a response body for the EC2 protocol.
func Unmarshal(r *aws.Request) {
defer r.HTTPResponse.Body.Close()
if r.DataFilled() {
decoder := xml.NewDecoder(r.HTTPResponse.Body)
err := xmlutil.UnmarshalXML(r.Data, decoder, "")
if err != nil {
r.Error = awserr.New("SerializationError", "failed decoding EC2 Query response", err)
return
}
}
}
// UnmarshalMeta unmarshals response headers for the EC2 protocol.
func UnmarshalMeta(r *aws.Request) {
// TODO implement unmarshaling of request IDs
}
type xmlErrorResponse struct {
XMLName xml.Name `xml:"Response"`
Code string `xml:"Errors>Error>Code"`
Message string `xml:"Errors>Error>Message"`
RequestID string `xml:"RequestId"`
}
// UnmarshalError unmarshals a response error for the EC2 protocol.
func UnmarshalError(r *aws.Request) {
defer r.HTTPResponse.Body.Close()
resp := &xmlErrorResponse{}
err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
if err != nil && err != io.EOF {
r.Error = awserr.New("SerializationError", "failed decoding EC2 Query error response", err)
} else {
r.Error = awserr.NewRequestFailure(
awserr.New(resp.Code, resp.Message, nil),
r.HTTPResponse.StatusCode,
resp.RequestID,
)
}
}

View File

@@ -0,0 +1,816 @@
package ec2query_test
import (
"bytes"
"encoding/json"
"encoding/xml"
"io"
"io/ioutil"
"net/http"
"net/url"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/internal/protocol/ec2query"
"github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil"
"github.com/aws/aws-sdk-go/internal/signer/v4"
"github.com/aws/aws-sdk-go/internal/util"
"github.com/stretchr/testify/assert"
)
var _ bytes.Buffer // always import bytes
var _ http.Request
var _ json.Marshaler
var _ time.Time
var _ xmlutil.XMLNode
var _ xml.Attr
var _ = ioutil.Discard
var _ = util.Trim("")
var _ = url.Values{}
var _ = io.EOF
type OutputService1ProtocolTest struct {
*aws.Service
}
// New returns a new OutputService1ProtocolTest client.
func NewOutputService1ProtocolTest(config *aws.Config) *OutputService1ProtocolTest {
service := &aws.Service{
Config: aws.DefaultConfig.Merge(config),
ServiceName: "outputservice1protocoltest",
APIVersion: "",
}
service.Initialize()
// Handlers
service.Handlers.Sign.PushBack(v4.Sign)
service.Handlers.Build.PushBack(ec2query.Build)
service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
return &OutputService1ProtocolTest{service}
}
// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any
// custom request initialization.
func (c *OutputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
req := aws.NewRequest(c.Service, op, params, data)
return req
}
const opOutputService1TestCaseOperation1 = "OperationName"
// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation.
func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *aws.Request, output *OutputService1TestShapeOutputShape) {
op := &aws.Operation{
Name: opOutputService1TestCaseOperation1,
}
if input == nil {
input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{}
}
req = c.newRequest(op, input, output)
output = &OutputService1TestShapeOutputShape{}
req.Data = output
return
}
func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputShape, error) {
req, out := c.OutputService1TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type OutputService1TestShapeOutputService1TestCaseOperation1Input struct {
metadataOutputService1TestShapeOutputService1TestCaseOperation1Input `json:"-" xml:"-"`
}
type metadataOutputService1TestShapeOutputService1TestCaseOperation1Input struct {
SDKShapeTraits bool `type:"structure"`
}
type OutputService1TestShapeOutputShape struct {
Char *string `type:"character"`
Double *float64 `type:"double"`
FalseBool *bool `type:"boolean"`
Float *float64 `type:"float"`
Long *int64 `type:"long"`
Num *int64 `locationName:"FooNum" type:"integer"`
Str *string `type:"string"`
TrueBool *bool `type:"boolean"`
metadataOutputService1TestShapeOutputShape `json:"-" xml:"-"`
}
type metadataOutputService1TestShapeOutputShape struct {
SDKShapeTraits bool `type:"structure"`
}
type OutputService2ProtocolTest struct {
*aws.Service
}
// New returns a new OutputService2ProtocolTest client.
func NewOutputService2ProtocolTest(config *aws.Config) *OutputService2ProtocolTest {
service := &aws.Service{
Config: aws.DefaultConfig.Merge(config),
ServiceName: "outputservice2protocoltest",
APIVersion: "",
}
service.Initialize()
// Handlers
service.Handlers.Sign.PushBack(v4.Sign)
service.Handlers.Build.PushBack(ec2query.Build)
service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
return &OutputService2ProtocolTest{service}
}
// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any
// custom request initialization.
func (c *OutputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
req := aws.NewRequest(c.Service, op, params, data)
return req
}
const opOutputService2TestCaseOperation1 = "OperationName"
// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation.
func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *aws.Request, output *OutputService2TestShapeOutputShape) {
op := &aws.Operation{
Name: opOutputService2TestCaseOperation1,
}
if input == nil {
input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{}
}
req = c.newRequest(op, input, output)
output = &OutputService2TestShapeOutputShape{}
req.Data = output
return
}
func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputShape, error) {
req, out := c.OutputService2TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type OutputService2TestShapeOutputService2TestCaseOperation1Input struct {
metadataOutputService2TestShapeOutputService2TestCaseOperation1Input `json:"-" xml:"-"`
}
type metadataOutputService2TestShapeOutputService2TestCaseOperation1Input struct {
SDKShapeTraits bool `type:"structure"`
}
type OutputService2TestShapeOutputShape struct {
Blob []byte `type:"blob"`
metadataOutputService2TestShapeOutputShape `json:"-" xml:"-"`
}
type metadataOutputService2TestShapeOutputShape struct {
SDKShapeTraits bool `type:"structure"`
}
type OutputService3ProtocolTest struct {
*aws.Service
}
// New returns a new OutputService3ProtocolTest client.
func NewOutputService3ProtocolTest(config *aws.Config) *OutputService3ProtocolTest {
service := &aws.Service{
Config: aws.DefaultConfig.Merge(config),
ServiceName: "outputservice3protocoltest",
APIVersion: "",
}
service.Initialize()
// Handlers
service.Handlers.Sign.PushBack(v4.Sign)
service.Handlers.Build.PushBack(ec2query.Build)
service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
return &OutputService3ProtocolTest{service}
}
// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any
// custom request initialization.
func (c *OutputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
req := aws.NewRequest(c.Service, op, params, data)
return req
}
const opOutputService3TestCaseOperation1 = "OperationName"
// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation.
func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *aws.Request, output *OutputService3TestShapeOutputShape) {
op := &aws.Operation{
Name: opOutputService3TestCaseOperation1,
}
if input == nil {
input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{}
}
req = c.newRequest(op, input, output)
output = &OutputService3TestShapeOutputShape{}
req.Data = output
return
}
func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputShape, error) {
req, out := c.OutputService3TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type OutputService3TestShapeOutputService3TestCaseOperation1Input struct {
metadataOutputService3TestShapeOutputService3TestCaseOperation1Input `json:"-" xml:"-"`
}
type metadataOutputService3TestShapeOutputService3TestCaseOperation1Input struct {
SDKShapeTraits bool `type:"structure"`
}
type OutputService3TestShapeOutputShape struct {
ListMember []*string `type:"list"`
metadataOutputService3TestShapeOutputShape `json:"-" xml:"-"`
}
type metadataOutputService3TestShapeOutputShape struct {
SDKShapeTraits bool `type:"structure"`
}
type OutputService4ProtocolTest struct {
*aws.Service
}
// New returns a new OutputService4ProtocolTest client.
func NewOutputService4ProtocolTest(config *aws.Config) *OutputService4ProtocolTest {
service := &aws.Service{
Config: aws.DefaultConfig.Merge(config),
ServiceName: "outputservice4protocoltest",
APIVersion: "",
}
service.Initialize()
// Handlers
service.Handlers.Sign.PushBack(v4.Sign)
service.Handlers.Build.PushBack(ec2query.Build)
service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
return &OutputService4ProtocolTest{service}
}
// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any
// custom request initialization.
func (c *OutputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
req := aws.NewRequest(c.Service, op, params, data)
return req
}
const opOutputService4TestCaseOperation1 = "OperationName"
// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation.
func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *aws.Request, output *OutputService4TestShapeOutputShape) {
op := &aws.Operation{
Name: opOutputService4TestCaseOperation1,
}
if input == nil {
input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{}
}
req = c.newRequest(op, input, output)
output = &OutputService4TestShapeOutputShape{}
req.Data = output
return
}
func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputShape, error) {
req, out := c.OutputService4TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type OutputService4TestShapeOutputService4TestCaseOperation1Input struct {
metadataOutputService4TestShapeOutputService4TestCaseOperation1Input `json:"-" xml:"-"`
}
type metadataOutputService4TestShapeOutputService4TestCaseOperation1Input struct {
SDKShapeTraits bool `type:"structure"`
}
type OutputService4TestShapeOutputShape struct {
ListMember []*string `locationNameList:"item" type:"list"`
metadataOutputService4TestShapeOutputShape `json:"-" xml:"-"`
}
type metadataOutputService4TestShapeOutputShape struct {
SDKShapeTraits bool `type:"structure"`
}
type OutputService5ProtocolTest struct {
*aws.Service
}
// New returns a new OutputService5ProtocolTest client.
func NewOutputService5ProtocolTest(config *aws.Config) *OutputService5ProtocolTest {
service := &aws.Service{
Config: aws.DefaultConfig.Merge(config),
ServiceName: "outputservice5protocoltest",
APIVersion: "",
}
service.Initialize()
// Handlers
service.Handlers.Sign.PushBack(v4.Sign)
service.Handlers.Build.PushBack(ec2query.Build)
service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
return &OutputService5ProtocolTest{service}
}
// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any
// custom request initialization.
func (c *OutputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
req := aws.NewRequest(c.Service, op, params, data)
return req
}
const opOutputService5TestCaseOperation1 = "OperationName"
// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation.
func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *aws.Request, output *OutputService5TestShapeOutputShape) {
op := &aws.Operation{
Name: opOutputService5TestCaseOperation1,
}
if input == nil {
input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{}
}
req = c.newRequest(op, input, output)
output = &OutputService5TestShapeOutputShape{}
req.Data = output
return
}
func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputShape, error) {
req, out := c.OutputService5TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type OutputService5TestShapeOutputService5TestCaseOperation1Input struct {
metadataOutputService5TestShapeOutputService5TestCaseOperation1Input `json:"-" xml:"-"`
}
type metadataOutputService5TestShapeOutputService5TestCaseOperation1Input struct {
SDKShapeTraits bool `type:"structure"`
}
type OutputService5TestShapeOutputShape struct {
ListMember []*string `type:"list" flattened:"true"`
metadataOutputService5TestShapeOutputShape `json:"-" xml:"-"`
}
type metadataOutputService5TestShapeOutputShape struct {
SDKShapeTraits bool `type:"structure"`
}
type OutputService6ProtocolTest struct {
*aws.Service
}
// New returns a new OutputService6ProtocolTest client.
func NewOutputService6ProtocolTest(config *aws.Config) *OutputService6ProtocolTest {
service := &aws.Service{
Config: aws.DefaultConfig.Merge(config),
ServiceName: "outputservice6protocoltest",
APIVersion: "",
}
service.Initialize()
// Handlers
service.Handlers.Sign.PushBack(v4.Sign)
service.Handlers.Build.PushBack(ec2query.Build)
service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
return &OutputService6ProtocolTest{service}
}
// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any
// custom request initialization.
func (c *OutputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
req := aws.NewRequest(c.Service, op, params, data)
return req
}
const opOutputService6TestCaseOperation1 = "OperationName"
// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation.
func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *aws.Request, output *OutputService6TestShapeOutputShape) {
op := &aws.Operation{
Name: opOutputService6TestCaseOperation1,
}
if input == nil {
input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{}
}
req = c.newRequest(op, input, output)
output = &OutputService6TestShapeOutputShape{}
req.Data = output
return
}
func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputShape, error) {
req, out := c.OutputService6TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type OutputService6TestShapeOutputService6TestCaseOperation1Input struct {
metadataOutputService6TestShapeOutputService6TestCaseOperation1Input `json:"-" xml:"-"`
}
type metadataOutputService6TestShapeOutputService6TestCaseOperation1Input struct {
SDKShapeTraits bool `type:"structure"`
}
type OutputService6TestShapeOutputShape struct {
Map map[string]*OutputService6TestShapeStructureType `type:"map"`
metadataOutputService6TestShapeOutputShape `json:"-" xml:"-"`
}
type metadataOutputService6TestShapeOutputShape struct {
SDKShapeTraits bool `type:"structure"`
}
type OutputService6TestShapeStructureType struct {
Foo *string `locationName:"foo" type:"string"`
metadataOutputService6TestShapeStructureType `json:"-" xml:"-"`
}
type metadataOutputService6TestShapeStructureType struct {
SDKShapeTraits bool `type:"structure"`
}
type OutputService7ProtocolTest struct {
*aws.Service
}
// New returns a new OutputService7ProtocolTest client.
func NewOutputService7ProtocolTest(config *aws.Config) *OutputService7ProtocolTest {
service := &aws.Service{
Config: aws.DefaultConfig.Merge(config),
ServiceName: "outputservice7protocoltest",
APIVersion: "",
}
service.Initialize()
// Handlers
service.Handlers.Sign.PushBack(v4.Sign)
service.Handlers.Build.PushBack(ec2query.Build)
service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
return &OutputService7ProtocolTest{service}
}
// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any
// custom request initialization.
func (c *OutputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
req := aws.NewRequest(c.Service, op, params, data)
return req
}
const opOutputService7TestCaseOperation1 = "OperationName"
// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation.
func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *aws.Request, output *OutputService7TestShapeOutputShape) {
op := &aws.Operation{
Name: opOutputService7TestCaseOperation1,
}
if input == nil {
input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{}
}
req = c.newRequest(op, input, output)
output = &OutputService7TestShapeOutputShape{}
req.Data = output
return
}
func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputShape, error) {
req, out := c.OutputService7TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type OutputService7TestShapeOutputService7TestCaseOperation1Input struct {
metadataOutputService7TestShapeOutputService7TestCaseOperation1Input `json:"-" xml:"-"`
}
type metadataOutputService7TestShapeOutputService7TestCaseOperation1Input struct {
SDKShapeTraits bool `type:"structure"`
}
type OutputService7TestShapeOutputShape struct {
Map map[string]*string `type:"map" flattened:"true"`
metadataOutputService7TestShapeOutputShape `json:"-" xml:"-"`
}
type metadataOutputService7TestShapeOutputShape struct {
SDKShapeTraits bool `type:"structure"`
}
type OutputService8ProtocolTest struct {
*aws.Service
}
// New returns a new OutputService8ProtocolTest client.
func NewOutputService8ProtocolTest(config *aws.Config) *OutputService8ProtocolTest {
service := &aws.Service{
Config: aws.DefaultConfig.Merge(config),
ServiceName: "outputservice8protocoltest",
APIVersion: "",
}
service.Initialize()
// Handlers
service.Handlers.Sign.PushBack(v4.Sign)
service.Handlers.Build.PushBack(ec2query.Build)
service.Handlers.Unmarshal.PushBack(ec2query.Unmarshal)
service.Handlers.UnmarshalMeta.PushBack(ec2query.UnmarshalMeta)
service.Handlers.UnmarshalError.PushBack(ec2query.UnmarshalError)
return &OutputService8ProtocolTest{service}
}
// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any
// custom request initialization.
func (c *OutputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
req := aws.NewRequest(c.Service, op, params, data)
return req
}
const opOutputService8TestCaseOperation1 = "OperationName"
// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation.
func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *aws.Request, output *OutputService8TestShapeOutputShape) {
op := &aws.Operation{
Name: opOutputService8TestCaseOperation1,
}
if input == nil {
input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{}
}
req = c.newRequest(op, input, output)
output = &OutputService8TestShapeOutputShape{}
req.Data = output
return
}
func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputShape, error) {
req, out := c.OutputService8TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type OutputService8TestShapeOutputService8TestCaseOperation1Input struct {
metadataOutputService8TestShapeOutputService8TestCaseOperation1Input `json:"-" xml:"-"`
}
type metadataOutputService8TestShapeOutputService8TestCaseOperation1Input struct {
SDKShapeTraits bool `type:"structure"`
}
type OutputService8TestShapeOutputShape struct {
Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map" flattened:"true"`
metadataOutputService8TestShapeOutputShape `json:"-" xml:"-"`
}
type metadataOutputService8TestShapeOutputShape struct {
SDKShapeTraits bool `type:"structure"`
}
//
// Tests begin here
//
func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) {
svc := NewOutputService1ProtocolTest(nil)
buf := bytes.NewReader([]byte("<OperationNameResponse><Str>myname</Str><FooNum>123</FooNum><FalseBool>false</FalseBool><TrueBool>true</TrueBool><Float>1.2</Float><Double>1.3</Double><Long>200</Long><Char>a</Char><RequestId>request-id</RequestId></OperationNameResponse>"))
req, out := svc.OutputService1TestCaseOperation1Request(nil)
req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
// set headers
// unmarshal response
ec2query.UnmarshalMeta(req)
ec2query.Unmarshal(req)
assert.NoError(t, req.Error)
// assert response
assert.NotNil(t, out) // ensure out variable is used
assert.Equal(t, "a", *out.Char)
assert.Equal(t, 1.3, *out.Double)
assert.Equal(t, false, *out.FalseBool)
assert.Equal(t, 1.2, *out.Float)
assert.Equal(t, int64(200), *out.Long)
assert.Equal(t, int64(123), *out.Num)
assert.Equal(t, "myname", *out.Str)
assert.Equal(t, true, *out.TrueBool)
}
func TestOutputService2ProtocolTestBlobCase1(t *testing.T) {
svc := NewOutputService2ProtocolTest(nil)
buf := bytes.NewReader([]byte("<OperationNameResponse><Blob>dmFsdWU=</Blob><RequestId>requestid</RequestId></OperationNameResponse>"))
req, out := svc.OutputService2TestCaseOperation1Request(nil)
req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
// set headers
// unmarshal response
ec2query.UnmarshalMeta(req)
ec2query.Unmarshal(req)
assert.NoError(t, req.Error)
// assert response
assert.NotNil(t, out) // ensure out variable is used
assert.Equal(t, "value", string(out.Blob))
}
func TestOutputService3ProtocolTestListsCase1(t *testing.T) {
svc := NewOutputService3ProtocolTest(nil)
buf := bytes.NewReader([]byte("<OperationNameResponse><ListMember><member>abc</member><member>123</member></ListMember><RequestId>requestid</RequestId></OperationNameResponse>"))
req, out := svc.OutputService3TestCaseOperation1Request(nil)
req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
// set headers
// unmarshal response
ec2query.UnmarshalMeta(req)
ec2query.Unmarshal(req)
assert.NoError(t, req.Error)
// assert response
assert.NotNil(t, out) // ensure out variable is used
assert.Equal(t, "abc", *out.ListMember[0])
assert.Equal(t, "123", *out.ListMember[1])
}
func TestOutputService4ProtocolTestListWithCustomMemberNameCase1(t *testing.T) {
svc := NewOutputService4ProtocolTest(nil)
buf := bytes.NewReader([]byte("<OperationNameResponse><ListMember><item>abc</item><item>123</item></ListMember><RequestId>requestid</RequestId></OperationNameResponse>"))
req, out := svc.OutputService4TestCaseOperation1Request(nil)
req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
// set headers
// unmarshal response
ec2query.UnmarshalMeta(req)
ec2query.Unmarshal(req)
assert.NoError(t, req.Error)
// assert response
assert.NotNil(t, out) // ensure out variable is used
assert.Equal(t, "abc", *out.ListMember[0])
assert.Equal(t, "123", *out.ListMember[1])
}
func TestOutputService5ProtocolTestFlattenedListCase1(t *testing.T) {
svc := NewOutputService5ProtocolTest(nil)
buf := bytes.NewReader([]byte("<OperationNameResponse><ListMember>abc</ListMember><ListMember>123</ListMember><RequestId>requestid</RequestId></OperationNameResponse>"))
req, out := svc.OutputService5TestCaseOperation1Request(nil)
req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
// set headers
// unmarshal response
ec2query.UnmarshalMeta(req)
ec2query.Unmarshal(req)
assert.NoError(t, req.Error)
// assert response
assert.NotNil(t, out) // ensure out variable is used
assert.Equal(t, "abc", *out.ListMember[0])
assert.Equal(t, "123", *out.ListMember[1])
}
func TestOutputService6ProtocolTestNormalMapCase1(t *testing.T) {
svc := NewOutputService6ProtocolTest(nil)
buf := bytes.NewReader([]byte("<OperationNameResponse><Map><entry><key>qux</key><value><foo>bar</foo></value></entry><entry><key>baz</key><value><foo>bam</foo></value></entry></Map><RequestId>requestid</RequestId></OperationNameResponse>"))
req, out := svc.OutputService6TestCaseOperation1Request(nil)
req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
// set headers
// unmarshal response
ec2query.UnmarshalMeta(req)
ec2query.Unmarshal(req)
assert.NoError(t, req.Error)
// assert response
assert.NotNil(t, out) // ensure out variable is used
assert.Equal(t, "bam", *out.Map["baz"].Foo)
assert.Equal(t, "bar", *out.Map["qux"].Foo)
}
func TestOutputService7ProtocolTestFlattenedMapCase1(t *testing.T) {
svc := NewOutputService7ProtocolTest(nil)
buf := bytes.NewReader([]byte("<OperationNameResponse><Map><key>qux</key><value>bar</value></Map><Map><key>baz</key><value>bam</value></Map><RequestId>requestid</RequestId></OperationNameResponse>"))
req, out := svc.OutputService7TestCaseOperation1Request(nil)
req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
// set headers
// unmarshal response
ec2query.UnmarshalMeta(req)
ec2query.Unmarshal(req)
assert.NoError(t, req.Error)
// assert response
assert.NotNil(t, out) // ensure out variable is used
assert.Equal(t, "bam", *out.Map["baz"])
assert.Equal(t, "bar", *out.Map["qux"])
}
func TestOutputService8ProtocolTestNamedMapCase1(t *testing.T) {
svc := NewOutputService8ProtocolTest(nil)
buf := bytes.NewReader([]byte("<OperationNameResponse><Map><foo>qux</foo><bar>bar</bar></Map><Map><foo>baz</foo><bar>bam</bar></Map><RequestId>requestid</RequestId></OperationNameResponse>"))
req, out := svc.OutputService8TestCaseOperation1Request(nil)
req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
// set headers
// unmarshal response
ec2query.UnmarshalMeta(req)
ec2query.Unmarshal(req)
assert.NoError(t, req.Error)
// assert response
assert.NotNil(t, out) // ensure out variable is used
assert.Equal(t, "bam", *out.Map["baz"])
assert.Equal(t, "bar", *out.Map["qux"])
}

View File

@@ -0,0 +1,33 @@
// Package query provides serialisation of AWS query requests, and responses.
package query
//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/query.json build_test.go
import (
"net/url"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/internal/protocol/query/queryutil"
)
// Build builds a request for an AWS Query service.
func Build(r *aws.Request) {
body := url.Values{
"Action": {r.Operation.Name},
"Version": {r.Service.APIVersion},
}
if err := queryutil.Parse(body, r.Params, false); err != nil {
r.Error = awserr.New("SerializationError", "failed encoding Query request", err)
return
}
if r.ExpireTime == 0 {
r.HTTPRequest.Method = "POST"
r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
r.SetBufferBody([]byte(body.Encode()))
} else { // This is a pre-signed request
r.HTTPRequest.Method = "GET"
r.HTTPRequest.URL.RawQuery = body.Encode()
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,223 @@
package queryutil
import (
"encoding/base64"
"fmt"
"net/url"
"reflect"
"sort"
"strconv"
"strings"
"time"
)
// Parse parses an object i and fills a url.Values object. The isEC2 flag
// indicates if this is the EC2 Query sub-protocol.
func Parse(body url.Values, i interface{}, isEC2 bool) error {
q := queryParser{isEC2: isEC2}
return q.parseValue(body, reflect.ValueOf(i), "", "")
}
func elemOf(value reflect.Value) reflect.Value {
for value.Kind() == reflect.Ptr {
value = value.Elem()
}
return value
}
type queryParser struct {
isEC2 bool
}
func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
value = elemOf(value)
// no need to handle zero values
if !value.IsValid() {
return nil
}
t := tag.Get("type")
if t == "" {
switch value.Kind() {
case reflect.Struct:
t = "structure"
case reflect.Slice:
t = "list"
case reflect.Map:
t = "map"
}
}
switch t {
case "structure":
return q.parseStruct(v, value, prefix)
case "list":
return q.parseList(v, value, prefix, tag)
case "map":
return q.parseMap(v, value, prefix, tag)
default:
return q.parseScalar(v, value, prefix, tag)
}
}
func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error {
if !value.IsValid() {
return nil
}
t := value.Type()
for i := 0; i < value.NumField(); i++ {
if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c {
continue // ignore unexported fields
}
value := elemOf(value.Field(i))
field := t.Field(i)
var name string
if q.isEC2 {
name = field.Tag.Get("queryName")
}
if name == "" {
if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
name = field.Tag.Get("locationNameList")
} else if locName := field.Tag.Get("locationName"); locName != "" {
name = locName
}
if name != "" && q.isEC2 {
name = strings.ToUpper(name[0:1]) + name[1:]
}
}
if name == "" {
name = field.Name
}
if prefix != "" {
name = prefix + "." + name
}
if err := q.parseValue(v, value, name, field.Tag); err != nil {
return err
}
}
return nil
}
func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
// If it's empty, generate an empty value
if !value.IsNil() && value.Len() == 0 {
v.Set(prefix, "")
return nil
}
// check for unflattened list member
if !q.isEC2 && tag.Get("flattened") == "" {
prefix += ".member"
}
for i := 0; i < value.Len(); i++ {
slicePrefix := prefix
if slicePrefix == "" {
slicePrefix = strconv.Itoa(i + 1)
} else {
slicePrefix = slicePrefix + "." + strconv.Itoa(i+1)
}
if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil {
return err
}
}
return nil
}
func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
// If it's empty, generate an empty value
if !value.IsNil() && value.Len() == 0 {
v.Set(prefix, "")
return nil
}
// check for unflattened list member
if !q.isEC2 && tag.Get("flattened") == "" {
prefix += ".entry"
}
// sort keys for improved serialization consistency.
// this is not strictly necessary for protocol support.
mapKeyValues := value.MapKeys()
mapKeys := map[string]reflect.Value{}
mapKeyNames := make([]string, len(mapKeyValues))
for i, mapKey := range mapKeyValues {
name := mapKey.String()
mapKeys[name] = mapKey
mapKeyNames[i] = name
}
sort.Strings(mapKeyNames)
for i, mapKeyName := range mapKeyNames {
mapKey := mapKeys[mapKeyName]
mapValue := value.MapIndex(mapKey)
kname := tag.Get("locationNameKey")
if kname == "" {
kname = "key"
}
vname := tag.Get("locationNameValue")
if vname == "" {
vname = "value"
}
// serialize key
var keyName string
if prefix == "" {
keyName = strconv.Itoa(i+1) + "." + kname
} else {
keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname
}
if err := q.parseValue(v, mapKey, keyName, ""); err != nil {
return err
}
// serialize value
var valueName string
if prefix == "" {
valueName = strconv.Itoa(i+1) + "." + vname
} else {
valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname
}
if err := q.parseValue(v, mapValue, valueName, ""); err != nil {
return err
}
}
return nil
}
func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error {
switch value := r.Interface().(type) {
case string:
v.Set(name, value)
case []byte:
if !r.IsNil() {
v.Set(name, base64.StdEncoding.EncodeToString(value))
}
case bool:
v.Set(name, strconv.FormatBool(value))
case int64:
v.Set(name, strconv.FormatInt(value, 10))
case int:
v.Set(name, strconv.Itoa(value))
case float64:
v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
case float32:
v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
case time.Time:
const ISO8601UTC = "2006-01-02T15:04:05Z"
v.Set(name, value.UTC().Format(ISO8601UTC))
default:
return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
}
return nil
}

View File

@@ -0,0 +1,29 @@
package query
//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/query.json unmarshal_test.go
import (
"encoding/xml"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil"
)
// Unmarshal unmarshals a response for an AWS Query service.
func Unmarshal(r *aws.Request) {
defer r.HTTPResponse.Body.Close()
if r.DataFilled() {
decoder := xml.NewDecoder(r.HTTPResponse.Body)
err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
if err != nil {
r.Error = awserr.New("SerializationError", "failed decoding Query response", err)
return
}
}
}
// UnmarshalMeta unmarshals header response values for an AWS Query service.
func UnmarshalMeta(r *aws.Request) {
// TODO implement unmarshaling of request IDs
}

View File

@@ -0,0 +1,33 @@
package query
import (
"encoding/xml"
"io"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
)
type xmlErrorResponse struct {
XMLName xml.Name `xml:"ErrorResponse"`
Code string `xml:"Error>Code"`
Message string `xml:"Error>Message"`
RequestID string `xml:"RequestId"`
}
// UnmarshalError unmarshals an error response for an AWS Query service.
func UnmarshalError(r *aws.Request) {
defer r.HTTPResponse.Body.Close()
resp := &xmlErrorResponse{}
err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
if err != nil && err != io.EOF {
r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err)
} else {
r.Error = awserr.NewRequestFailure(
awserr.New(resp.Code, resp.Message, nil),
r.HTTPResponse.StatusCode,
resp.RequestID,
)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,217 @@
// Package rest provides RESTful serialisation of AWS requests and responses.
package rest
import (
"bytes"
"encoding/base64"
"fmt"
"io"
"net/url"
"path"
"reflect"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
)
// RFC822 returns an RFC822 formatted timestamp for AWS protocols
const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT"
// Whether the byte value can be sent without escaping in AWS URLs
var noEscape [256]bool
func init() {
for i := 0; i < len(noEscape); i++ {
// AWS expects every character except these to be escaped
noEscape[i] = (i >= 'A' && i <= 'Z') ||
(i >= 'a' && i <= 'z') ||
(i >= '0' && i <= '9') ||
i == '-' ||
i == '.' ||
i == '_' ||
i == '~'
}
}
// Build builds the REST component of a service request.
func Build(r *aws.Request) {
if r.ParamsFilled() {
v := reflect.ValueOf(r.Params).Elem()
buildLocationElements(r, v)
buildBody(r, v)
}
}
func buildLocationElements(r *aws.Request, v reflect.Value) {
query := r.HTTPRequest.URL.Query()
for i := 0; i < v.NumField(); i++ {
m := v.Field(i)
if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
continue
}
if m.IsValid() {
field := v.Type().Field(i)
name := field.Tag.Get("locationName")
if name == "" {
name = field.Name
}
if m.Kind() == reflect.Ptr {
m = m.Elem()
}
if !m.IsValid() {
continue
}
switch field.Tag.Get("location") {
case "headers": // header maps
buildHeaderMap(r, m, field.Tag.Get("locationName"))
case "header":
buildHeader(r, m, name)
case "uri":
buildURI(r, m, name)
case "querystring":
buildQueryString(r, m, name, query)
}
}
if r.Error != nil {
return
}
}
r.HTTPRequest.URL.RawQuery = query.Encode()
updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path)
}
func buildBody(r *aws.Request, v reflect.Value) {
if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
if payloadName := field.Tag.Get("payload"); payloadName != "" {
pfield, _ := v.Type().FieldByName(payloadName)
if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
payload := reflect.Indirect(v.FieldByName(payloadName))
if payload.IsValid() && payload.Interface() != nil {
switch reader := payload.Interface().(type) {
case io.ReadSeeker:
r.SetReaderBody(reader)
case []byte:
r.SetBufferBody(reader)
case string:
r.SetStringBody(reader)
default:
r.Error = awserr.New("SerializationError",
"failed to encode REST request",
fmt.Errorf("unknown payload type %s", payload.Type()))
}
}
}
}
}
}
func buildHeader(r *aws.Request, v reflect.Value, name string) {
str, err := convertType(v)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
} else if str != nil {
r.HTTPRequest.Header.Add(name, *str)
}
}
func buildHeaderMap(r *aws.Request, v reflect.Value, prefix string) {
for _, key := range v.MapKeys() {
str, err := convertType(v.MapIndex(key))
if err != nil {
r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
} else if str != nil {
r.HTTPRequest.Header.Add(prefix+key.String(), *str)
}
}
}
func buildURI(r *aws.Request, v reflect.Value, name string) {
value, err := convertType(v)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
} else if value != nil {
uri := r.HTTPRequest.URL.Path
uri = strings.Replace(uri, "{"+name+"}", EscapePath(*value, true), -1)
uri = strings.Replace(uri, "{"+name+"+}", EscapePath(*value, false), -1)
r.HTTPRequest.URL.Path = uri
}
}
func buildQueryString(r *aws.Request, v reflect.Value, name string, query url.Values) {
str, err := convertType(v)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
} else if str != nil {
query.Set(name, *str)
}
}
func updatePath(url *url.URL, urlPath string) {
scheme, query := url.Scheme, url.RawQuery
hasSlash := strings.HasSuffix(urlPath, "/")
// clean up path
urlPath = path.Clean(urlPath)
if hasSlash && !strings.HasSuffix(urlPath, "/") {
urlPath += "/"
}
// get formatted URL minus scheme so we can build this into Opaque
url.Scheme, url.Path, url.RawQuery = "", "", ""
s := url.String()
url.Scheme = scheme
url.RawQuery = query
// build opaque URI
url.Opaque = s + urlPath
}
// EscapePath escapes part of a URL path in Amazon style
func EscapePath(path string, encodeSep bool) string {
var buf bytes.Buffer
for i := 0; i < len(path); i++ {
c := path[i]
if noEscape[c] || (c == '/' && !encodeSep) {
buf.WriteByte(c)
} else {
buf.WriteByte('%')
buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16)))
}
}
return buf.String()
}
func convertType(v reflect.Value) (*string, error) {
v = reflect.Indirect(v)
if !v.IsValid() {
return nil, nil
}
var str string
switch value := v.Interface().(type) {
case string:
str = value
case []byte:
str = base64.StdEncoding.EncodeToString(value)
case bool:
str = strconv.FormatBool(value)
case int64:
str = strconv.FormatInt(value, 10)
case float64:
str = strconv.FormatFloat(value, 'f', -1, 64)
case time.Time:
str = value.UTC().Format(RFC822)
default:
err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
return nil, err
}
return &str, nil
}

View File

@@ -0,0 +1,45 @@
package rest
import "reflect"
// PayloadMember returns the payload field member of i if there is one, or nil.
func PayloadMember(i interface{}) interface{} {
if i == nil {
return nil
}
v := reflect.ValueOf(i).Elem()
if !v.IsValid() {
return nil
}
if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
if payloadName := field.Tag.Get("payload"); payloadName != "" {
field, _ := v.Type().FieldByName(payloadName)
if field.Tag.Get("type") != "structure" {
return nil
}
payload := v.FieldByName(payloadName)
if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) {
return payload.Interface()
}
}
}
return nil
}
// PayloadType returns the type of a payload field member of i if there is one, or "".
func PayloadType(i interface{}) string {
v := reflect.Indirect(reflect.ValueOf(i))
if !v.IsValid() {
return ""
}
if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
if payloadName := field.Tag.Get("payload"); payloadName != "" {
if member, ok := v.Type().FieldByName(payloadName); ok {
return member.Tag.Get("type")
}
}
}
return ""
}

View File

@@ -0,0 +1,174 @@
package rest
import (
"encoding/base64"
"fmt"
"io/ioutil"
"net/http"
"reflect"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
)
// Unmarshal unmarshals the REST component of a response in a REST service.
func Unmarshal(r *aws.Request) {
if r.DataFilled() {
v := reflect.Indirect(reflect.ValueOf(r.Data))
unmarshalBody(r, v)
unmarshalLocationElements(r, v)
}
}
func unmarshalBody(r *aws.Request, v reflect.Value) {
if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
if payloadName := field.Tag.Get("payload"); payloadName != "" {
pfield, _ := v.Type().FieldByName(payloadName)
if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
payload := v.FieldByName(payloadName)
if payload.IsValid() {
switch payload.Interface().(type) {
case []byte:
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
} else {
payload.Set(reflect.ValueOf(b))
}
case *string:
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
} else {
str := string(b)
payload.Set(reflect.ValueOf(&str))
}
default:
switch payload.Type().String() {
case "io.ReadSeeker":
payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body)))
case "aws.ReadSeekCloser", "io.ReadCloser":
payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
default:
r.Error = awserr.New("SerializationError",
"failed to decode REST response",
fmt.Errorf("unknown payload type %s", payload.Type()))
}
}
}
}
}
}
}
func unmarshalLocationElements(r *aws.Request, v reflect.Value) {
for i := 0; i < v.NumField(); i++ {
m, field := v.Field(i), v.Type().Field(i)
if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
continue
}
if m.IsValid() {
name := field.Tag.Get("locationName")
if name == "" {
name = field.Name
}
switch field.Tag.Get("location") {
case "statusCode":
unmarshalStatusCode(m, r.HTTPResponse.StatusCode)
case "header":
err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name))
if err != nil {
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
break
}
case "headers":
prefix := field.Tag.Get("locationName")
err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
break
}
}
}
if r.Error != nil {
return
}
}
}
func unmarshalStatusCode(v reflect.Value, statusCode int) {
if !v.IsValid() {
return
}
switch v.Interface().(type) {
case *int64:
s := int64(statusCode)
v.Set(reflect.ValueOf(&s))
}
}
func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
switch r.Interface().(type) {
case map[string]*string: // we only support string map value types
out := map[string]*string{}
for k, v := range headers {
k = http.CanonicalHeaderKey(k)
if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
out[k[len(prefix):]] = &v[0]
}
}
r.Set(reflect.ValueOf(out))
}
return nil
}
func unmarshalHeader(v reflect.Value, header string) error {
if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
return nil
}
switch v.Interface().(type) {
case *string:
v.Set(reflect.ValueOf(&header))
case []byte:
b, err := base64.StdEncoding.DecodeString(header)
if err != nil {
return err
}
v.Set(reflect.ValueOf(&b))
case *bool:
b, err := strconv.ParseBool(header)
if err != nil {
return err
}
v.Set(reflect.ValueOf(&b))
case *int64:
i, err := strconv.ParseInt(header, 10, 64)
if err != nil {
return err
}
v.Set(reflect.ValueOf(&i))
case *float64:
f, err := strconv.ParseFloat(header, 64)
if err != nil {
return err
}
v.Set(reflect.ValueOf(&f))
case *time.Time:
t, err := time.Parse(RFC822, header)
if err != nil {
return err
}
v.Set(reflect.ValueOf(&t))
default:
err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
return err
}
return nil
}

View File

@@ -0,0 +1,287 @@
// Package xmlutil provides XML serialisation of AWS requests and responses.
package xmlutil
import (
"encoding/base64"
"encoding/xml"
"fmt"
"reflect"
"sort"
"strconv"
"strings"
"time"
)
// BuildXML will serialize params into an xml.Encoder.
// Error will be returned if the serialization of any of the params or nested values fails.
func BuildXML(params interface{}, e *xml.Encoder) error {
b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
root := NewXMLElement(xml.Name{})
if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
return err
}
for _, c := range root.Children {
for _, v := range c {
return StructToXML(e, v, false)
}
}
return nil
}
// Returns the reflection element of a value, if it is a pointer.
func elemOf(value reflect.Value) reflect.Value {
for value.Kind() == reflect.Ptr {
value = value.Elem()
}
return value
}
// A xmlBuilder serializes values from Go code to XML
type xmlBuilder struct {
encoder *xml.Encoder
namespaces map[string]string
}
// buildValue generic XMLNode builder for any type. Will build value for their specific type
// struct, list, map, scalar.
//
// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If
// type is not provided reflect will be used to determine the value's type.
func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
value = elemOf(value)
if !value.IsValid() { // no need to handle zero values
return nil
} else if tag.Get("location") != "" { // don't handle non-body location values
return nil
}
t := tag.Get("type")
if t == "" {
switch value.Kind() {
case reflect.Struct:
t = "structure"
case reflect.Slice:
t = "list"
case reflect.Map:
t = "map"
}
}
switch t {
case "structure":
if field, ok := value.Type().FieldByName("SDKShapeTraits"); ok {
tag = tag + reflect.StructTag(" ") + field.Tag
}
return b.buildStruct(value, current, tag)
case "list":
return b.buildList(value, current, tag)
case "map":
return b.buildMap(value, current, tag)
default:
return b.buildScalar(value, current, tag)
}
}
// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested
// types are converted to XMLNodes also.
func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
if !value.IsValid() {
return nil
}
fieldAdded := false
// unwrap payloads
if payload := tag.Get("payload"); payload != "" {
field, _ := value.Type().FieldByName(payload)
tag = field.Tag
value = elemOf(value.FieldByName(payload))
if !value.IsValid() {
return nil
}
}
child := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
// there is an xmlNamespace associated with this struct
if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" {
ns := xml.Attr{
Name: xml.Name{Local: "xmlns"},
Value: uri,
}
if prefix != "" {
b.namespaces[prefix] = uri // register the namespace
ns.Name.Local = "xmlns:" + prefix
}
child.Attr = append(child.Attr, ns)
}
t := value.Type()
for i := 0; i < value.NumField(); i++ {
if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c {
continue // ignore unexported fields
}
member := elemOf(value.Field(i))
field := t.Field(i)
mTag := field.Tag
if mTag.Get("location") != "" { // skip non-body members
continue
}
memberName := mTag.Get("locationName")
if memberName == "" {
memberName = field.Name
mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`)
}
if err := b.buildValue(member, child, mTag); err != nil {
return err
}
fieldAdded = true
}
if fieldAdded { // only append this child if we have one ore more valid members
current.AddChild(child)
}
return nil
}
// buildList adds the value's list items to the current XMLNode as children nodes. All
// nested values in the list are converted to XMLNodes also.
func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
if value.IsNil() { // don't build omitted lists
return nil
}
// check for unflattened list member
flattened := tag.Get("flattened") != ""
xname := xml.Name{Local: tag.Get("locationName")}
if flattened {
for i := 0; i < value.Len(); i++ {
child := NewXMLElement(xname)
current.AddChild(child)
if err := b.buildValue(value.Index(i), child, ""); err != nil {
return err
}
}
} else {
list := NewXMLElement(xname)
current.AddChild(list)
for i := 0; i < value.Len(); i++ {
iname := tag.Get("locationNameList")
if iname == "" {
iname = "member"
}
child := NewXMLElement(xml.Name{Local: iname})
list.AddChild(child)
if err := b.buildValue(value.Index(i), child, ""); err != nil {
return err
}
}
}
return nil
}
// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All
// nested values in the map are converted to XMLNodes also.
//
// Error will be returned if it is unable to build the map's values into XMLNodes
func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
if value.IsNil() { // don't build omitted maps
return nil
}
maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
current.AddChild(maproot)
current = maproot
kname, vname := "key", "value"
if n := tag.Get("locationNameKey"); n != "" {
kname = n
}
if n := tag.Get("locationNameValue"); n != "" {
vname = n
}
// sorting is not required for compliance, but it makes testing easier
keys := make([]string, value.Len())
for i, k := range value.MapKeys() {
keys[i] = k.String()
}
sort.Strings(keys)
for _, k := range keys {
v := value.MapIndex(reflect.ValueOf(k))
mapcur := current
if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps
child := NewXMLElement(xml.Name{Local: "entry"})
mapcur.AddChild(child)
mapcur = child
}
kchild := NewXMLElement(xml.Name{Local: kname})
kchild.Text = k
vchild := NewXMLElement(xml.Name{Local: vname})
mapcur.AddChild(kchild)
mapcur.AddChild(vchild)
if err := b.buildValue(v, vchild, ""); err != nil {
return err
}
}
return nil
}
// buildScalar will convert the value into a string and append it as a attribute or child
// of the current XMLNode.
//
// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value.
//
// Error will be returned if the value type is unsupported.
func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
var str string
switch converted := value.Interface().(type) {
case string:
str = converted
case []byte:
if !value.IsNil() {
str = base64.StdEncoding.EncodeToString(converted)
}
case bool:
str = strconv.FormatBool(converted)
case int64:
str = strconv.FormatInt(converted, 10)
case int:
str = strconv.Itoa(converted)
case float64:
str = strconv.FormatFloat(converted, 'f', -1, 64)
case float32:
str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
case time.Time:
const ISO8601UTC = "2006-01-02T15:04:05Z"
str = converted.UTC().Format(ISO8601UTC)
default:
return fmt.Errorf("unsupported value for param %s: %v (%s)",
tag.Get("locationName"), value.Interface(), value.Type().Name())
}
xname := xml.Name{Local: tag.Get("locationName")}
if tag.Get("xmlAttribute") != "" { // put into current node's attribute list
attr := xml.Attr{Name: xname, Value: str}
current.Attr = append(current.Attr, attr)
} else { // regular text node
current.AddChild(&XMLNode{Name: xname, Text: str})
}
return nil
}

View File

@@ -0,0 +1,260 @@
package xmlutil
import (
"encoding/base64"
"encoding/xml"
"fmt"
"io"
"reflect"
"strconv"
"strings"
"time"
)
// UnmarshalXML deserializes an xml.Decoder into the container v. V
// needs to match the shape of the XML expected to be decoded.
// If the shape doesn't match unmarshaling will fail.
func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
n, _ := XMLToStruct(d, nil)
if n.Children != nil {
for _, root := range n.Children {
for _, c := range root {
if wrappedChild, ok := c.Children[wrapper]; ok {
c = wrappedChild[0] // pull out wrapped element
}
err := parse(reflect.ValueOf(v), c, "")
if err != nil {
if err == io.EOF {
return nil
}
return err
}
}
}
return nil
}
return nil
}
// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect
// will be used to determine the type from r.
func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
rtype := r.Type()
if rtype.Kind() == reflect.Ptr {
rtype = rtype.Elem() // check kind of actual element type
}
t := tag.Get("type")
if t == "" {
switch rtype.Kind() {
case reflect.Struct:
t = "structure"
case reflect.Slice:
t = "list"
case reflect.Map:
t = "map"
}
}
switch t {
case "structure":
if field, ok := rtype.FieldByName("SDKShapeTraits"); ok {
tag = field.Tag
}
return parseStruct(r, node, tag)
case "list":
return parseList(r, node, tag)
case "map":
return parseMap(r, node, tag)
default:
return parseScalar(r, node, tag)
}
}
// parseStruct deserializes a structure and its fields from an XMLNode. Any nested
// types in the structure will also be deserialized.
func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
t := r.Type()
if r.Kind() == reflect.Ptr {
if r.IsNil() { // create the structure if it's nil
s := reflect.New(r.Type().Elem())
r.Set(s)
r = s
}
r = r.Elem()
t = t.Elem()
}
// unwrap any payloads
if payload := tag.Get("payload"); payload != "" {
field, _ := t.FieldByName(payload)
return parseStruct(r.FieldByName(payload), node, field.Tag)
}
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if c := field.Name[0:1]; strings.ToLower(c) == c {
continue // ignore unexported fields
}
// figure out what this field is called
name := field.Name
if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
name = field.Tag.Get("locationNameList")
} else if locName := field.Tag.Get("locationName"); locName != "" {
name = locName
}
// try to find the field by name in elements
elems := node.Children[name]
if elems == nil { // try to find the field in attributes
for _, a := range node.Attr {
if name == a.Name.Local {
// turn this into a text node for de-serializing
elems = []*XMLNode{{Text: a.Value}}
}
}
}
member := r.FieldByName(field.Name)
for _, elem := range elems {
err := parse(member, elem, field.Tag)
if err != nil {
return err
}
}
}
return nil
}
// parseList deserializes a list of values from an XML node. Each list entry
// will also be deserialized.
func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
t := r.Type()
if tag.Get("flattened") == "" { // look at all item entries
mname := "member"
if name := tag.Get("locationNameList"); name != "" {
mname = name
}
if Children, ok := node.Children[mname]; ok {
if r.IsNil() {
r.Set(reflect.MakeSlice(t, len(Children), len(Children)))
}
for i, c := range Children {
err := parse(r.Index(i), c, "")
if err != nil {
return err
}
}
}
} else { // flattened list means this is a single element
if r.IsNil() {
r.Set(reflect.MakeSlice(t, 0, 0))
}
childR := reflect.Zero(t.Elem())
r.Set(reflect.Append(r, childR))
err := parse(r.Index(r.Len()-1), node, "")
if err != nil {
return err
}
}
return nil
}
// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode
// will also be deserialized as map entries.
func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
if r.IsNil() {
r.Set(reflect.MakeMap(r.Type()))
}
if tag.Get("flattened") == "" { // look at all child entries
for _, entry := range node.Children["entry"] {
parseMapEntry(r, entry, tag)
}
} else { // this element is itself an entry
parseMapEntry(r, node, tag)
}
return nil
}
// parseMapEntry deserializes a map entry from a XML node.
func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
kname, vname := "key", "value"
if n := tag.Get("locationNameKey"); n != "" {
kname = n
}
if n := tag.Get("locationNameValue"); n != "" {
vname = n
}
keys, ok := node.Children[kname]
values := node.Children[vname]
if ok {
for i, key := range keys {
keyR := reflect.ValueOf(key.Text)
value := values[i]
valueR := reflect.New(r.Type().Elem()).Elem()
parse(valueR, value, "")
r.SetMapIndex(keyR, valueR)
}
}
return nil
}
// parseScaller deserializes an XMLNode value into a concrete type based on the
// interface type of r.
//
// Error is returned if the deserialization fails due to invalid type conversion,
// or unsupported interface type.
func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
switch r.Interface().(type) {
case *string:
r.Set(reflect.ValueOf(&node.Text))
return nil
case []byte:
b, err := base64.StdEncoding.DecodeString(node.Text)
if err != nil {
return err
}
r.Set(reflect.ValueOf(b))
case *bool:
v, err := strconv.ParseBool(node.Text)
if err != nil {
return err
}
r.Set(reflect.ValueOf(&v))
case *int64:
v, err := strconv.ParseInt(node.Text, 10, 64)
if err != nil {
return err
}
r.Set(reflect.ValueOf(&v))
case *float64:
v, err := strconv.ParseFloat(node.Text, 64)
if err != nil {
return err
}
r.Set(reflect.ValueOf(&v))
case *time.Time:
const ISO8601UTC = "2006-01-02T15:04:05Z"
t, err := time.Parse(ISO8601UTC, node.Text)
if err != nil {
return err
}
r.Set(reflect.ValueOf(&t))
default:
return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type())
}
return nil
}

View File

@@ -0,0 +1,105 @@
package xmlutil
import (
"encoding/xml"
"io"
"sort"
)
// A XMLNode contains the values to be encoded or decoded.
type XMLNode struct {
Name xml.Name `json:",omitempty"`
Children map[string][]*XMLNode `json:",omitempty"`
Text string `json:",omitempty"`
Attr []xml.Attr `json:",omitempty"`
}
// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
func NewXMLElement(name xml.Name) *XMLNode {
return &XMLNode{
Name: name,
Children: map[string][]*XMLNode{},
Attr: []xml.Attr{},
}
}
// AddChild adds child to the XMLNode.
func (n *XMLNode) AddChild(child *XMLNode) {
if _, ok := n.Children[child.Name.Local]; !ok {
n.Children[child.Name.Local] = []*XMLNode{}
}
n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child)
}
// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values.
func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
out := &XMLNode{}
for {
tok, err := d.Token()
if tok == nil || err == io.EOF {
break
}
if err != nil {
return out, err
}
switch typed := tok.(type) {
case xml.CharData:
out.Text = string(typed.Copy())
case xml.StartElement:
el := typed.Copy()
out.Attr = el.Attr
if out.Children == nil {
out.Children = map[string][]*XMLNode{}
}
name := typed.Name.Local
slice := out.Children[name]
if slice == nil {
slice = []*XMLNode{}
}
node, e := XMLToStruct(d, &el)
if e != nil {
return out, e
}
node.Name = typed.Name
slice = append(slice, node)
out.Children[name] = slice
case xml.EndElement:
if s != nil && s.Name.Local == typed.Name.Local { // matching end token
return out, nil
}
}
}
return out, nil
}
// StructToXML writes an XMLNode to a xml.Encoder as tokens.
func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr})
if node.Text != "" {
e.EncodeToken(xml.CharData([]byte(node.Text)))
} else if sorted {
sortedNames := []string{}
for k := range node.Children {
sortedNames = append(sortedNames, k)
}
sort.Strings(sortedNames)
for _, k := range sortedNames {
for _, v := range node.Children[k] {
StructToXML(e, v, sorted)
}
}
} else {
for _, c := range node.Children {
for _, v := range c {
StructToXML(e, v, sorted)
}
}
}
e.EncodeToken(xml.EndElement{Name: node.Name})
return e.Flush()
}

View File

@@ -0,0 +1,43 @@
package v4_test
import (
"net/url"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/internal/test/unit"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/stretchr/testify/assert"
)
var _ = unit.Imported
func TestPresignHandler(t *testing.T) {
svc := s3.New(nil)
req, _ := svc.PutObjectRequest(&s3.PutObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("key"),
ContentDisposition: aws.String("a+b c$d"),
ACL: aws.String("public-read"),
})
req.Time = time.Unix(0, 0)
urlstr, err := req.Presign(5 * time.Minute)
assert.NoError(t, err)
expectedDate := "19700101T000000Z"
expectedHeaders := "host;x-amz-acl"
expectedSig := "7edcb4e3a1bf12f4989018d75acbe3a7f03df24bd6f3112602d59fc551f0e4e2"
expectedCred := "AKID/19700101/mock-region/s3/aws4_request"
u, _ := url.Parse(urlstr)
urlQ := u.Query()
assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature"))
assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential"))
assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders"))
assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date"))
assert.Equal(t, "300", urlQ.Get("X-Amz-Expires"))
assert.NotContains(t, urlstr, "+") // + encoded as %20
}

View File

@@ -0,0 +1,364 @@
// Package v4 implements signing for AWS V4 signer
package v4
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/internal/protocol/rest"
)
const (
authHeaderPrefix = "AWS4-HMAC-SHA256"
timeFormat = "20060102T150405Z"
shortTimeFormat = "20060102"
)
var ignoredHeaders = map[string]bool{
"Authorization": true,
"Content-Type": true,
"Content-Length": true,
"User-Agent": true,
}
type signer struct {
Request *http.Request
Time time.Time
ExpireTime time.Duration
ServiceName string
Region string
CredValues credentials.Value
Credentials *credentials.Credentials
Query url.Values
Body io.ReadSeeker
Debug aws.LogLevelType
Logger aws.Logger
isPresign bool
formattedTime string
formattedShortTime string
signedHeaders string
canonicalHeaders string
canonicalString string
credentialString string
stringToSign string
signature string
authorization string
}
// Sign requests with signature version 4.
//
// Will sign the requests with the service config's Credentials object
// Signing is skipped if the credentials is the credentials.AnonymousCredentials
// object.
func Sign(req *aws.Request) {
// If the request does not need to be signed ignore the signing of the
// request if the AnonymousCredentials object is used.
if req.Service.Config.Credentials == credentials.AnonymousCredentials {
return
}
region := req.Service.SigningRegion
if region == "" {
region = aws.StringValue(req.Service.Config.Region)
}
name := req.Service.SigningName
if name == "" {
name = req.Service.ServiceName
}
s := signer{
Request: req.HTTPRequest,
Time: req.Time,
ExpireTime: req.ExpireTime,
Query: req.HTTPRequest.URL.Query(),
Body: req.Body,
ServiceName: name,
Region: region,
Credentials: req.Service.Config.Credentials,
Debug: req.Service.Config.LogLevel.Value(),
Logger: req.Service.Config.Logger,
}
req.Error = s.sign()
}
func (v4 *signer) sign() error {
if v4.ExpireTime != 0 {
v4.isPresign = true
}
if v4.isRequestSigned() {
if !v4.Credentials.IsExpired() {
// If the request is already signed, and the credentials have not
// expired yet ignore the signing request.
return nil
}
// The credentials have expired for this request. The current signing
// is invalid, and needs to be request because the request will fail.
if v4.isPresign {
v4.removePresign()
// Update the request's query string to ensure the values stays in
// sync in the case retrieving the new credentials fails.
v4.Request.URL.RawQuery = v4.Query.Encode()
}
}
var err error
v4.CredValues, err = v4.Credentials.Get()
if err != nil {
return err
}
if v4.isPresign {
v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
if v4.CredValues.SessionToken != "" {
v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
} else {
v4.Query.Del("X-Amz-Security-Token")
}
} else if v4.CredValues.SessionToken != "" {
v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
}
v4.build()
if v4.Debug.Matches(aws.LogDebugWithSigning) {
v4.logSigningInfo()
}
return nil
}
const logSignInfoMsg = `DEBUG: Request Signiture:
---[ CANONICAL STRING ]-----------------------------
%s
---[ STRING TO SIGN ]--------------------------------
%s%s
-----------------------------------------------------`
const logSignedURLMsg = `
---[ SIGNED URL ]------------------------------------
%s`
func (v4 *signer) logSigningInfo() {
signedURLMsg := ""
if v4.isPresign {
signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String())
}
msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg)
v4.Logger.Log(msg)
}
func (v4 *signer) build() {
v4.buildTime() // no depends
v4.buildCredentialString() // no depends
if v4.isPresign {
v4.buildQuery() // no depends
}
v4.buildCanonicalHeaders() // depends on cred string
v4.buildCanonicalString() // depends on canon headers / signed headers
v4.buildStringToSign() // depends on canon string
v4.buildSignature() // depends on string to sign
if v4.isPresign {
v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature
} else {
parts := []string{
authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString,
"SignedHeaders=" + v4.signedHeaders,
"Signature=" + v4.signature,
}
v4.Request.Header.Set("Authorization", strings.Join(parts, ", "))
}
}
func (v4 *signer) buildTime() {
v4.formattedTime = v4.Time.UTC().Format(timeFormat)
v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat)
if v4.isPresign {
duration := int64(v4.ExpireTime / time.Second)
v4.Query.Set("X-Amz-Date", v4.formattedTime)
v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
} else {
v4.Request.Header.Set("X-Amz-Date", v4.formattedTime)
}
}
func (v4 *signer) buildCredentialString() {
v4.credentialString = strings.Join([]string{
v4.formattedShortTime,
v4.Region,
v4.ServiceName,
"aws4_request",
}, "/")
if v4.isPresign {
v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString)
}
}
func (v4 *signer) buildQuery() {
for k, h := range v4.Request.Header {
if strings.HasPrefix(http.CanonicalHeaderKey(k), "X-Amz-") {
continue // never hoist x-amz-* headers, they must be signed
}
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
continue // never hoist ignored headers
}
v4.Request.Header.Del(k)
v4.Query.Del(k)
for _, v := range h {
v4.Query.Add(k, v)
}
}
}
func (v4 *signer) buildCanonicalHeaders() {
var headers []string
headers = append(headers, "host")
for k := range v4.Request.Header {
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
continue // ignored header
}
headers = append(headers, strings.ToLower(k))
}
sort.Strings(headers)
v4.signedHeaders = strings.Join(headers, ";")
if v4.isPresign {
v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders)
}
headerValues := make([]string, len(headers))
for i, k := range headers {
if k == "host" {
headerValues[i] = "host:" + v4.Request.URL.Host
} else {
headerValues[i] = k + ":" +
strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",")
}
}
v4.canonicalHeaders = strings.Join(headerValues, "\n")
}
func (v4 *signer) buildCanonicalString() {
v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1)
uri := v4.Request.URL.Opaque
if uri != "" {
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
} else {
uri = v4.Request.URL.Path
}
if uri == "" {
uri = "/"
}
if v4.ServiceName != "s3" {
uri = rest.EscapePath(uri, false)
}
v4.canonicalString = strings.Join([]string{
v4.Request.Method,
uri,
v4.Request.URL.RawQuery,
v4.canonicalHeaders + "\n",
v4.signedHeaders,
v4.bodyDigest(),
}, "\n")
}
func (v4 *signer) buildStringToSign() {
v4.stringToSign = strings.Join([]string{
authHeaderPrefix,
v4.formattedTime,
v4.credentialString,
hex.EncodeToString(makeSha256([]byte(v4.canonicalString))),
}, "\n")
}
func (v4 *signer) buildSignature() {
secret := v4.CredValues.SecretAccessKey
date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime))
region := makeHmac(date, []byte(v4.Region))
service := makeHmac(region, []byte(v4.ServiceName))
credentials := makeHmac(service, []byte("aws4_request"))
signature := makeHmac(credentials, []byte(v4.stringToSign))
v4.signature = hex.EncodeToString(signature)
}
func (v4 *signer) bodyDigest() string {
hash := v4.Request.Header.Get("X-Amz-Content-Sha256")
if hash == "" {
if v4.isPresign && v4.ServiceName == "s3" {
hash = "UNSIGNED-PAYLOAD"
} else if v4.Body == nil {
hash = hex.EncodeToString(makeSha256([]byte{}))
} else {
hash = hex.EncodeToString(makeSha256Reader(v4.Body))
}
v4.Request.Header.Add("X-Amz-Content-Sha256", hash)
}
return hash
}
// isRequestSigned returns if the request is currently signed or presigned
func (v4 *signer) isRequestSigned() bool {
if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" {
return true
}
if v4.Request.Header.Get("Authorization") != "" {
return true
}
return false
}
// unsign removes signing flags for both signed and presigned requests.
func (v4 *signer) removePresign() {
v4.Query.Del("X-Amz-Algorithm")
v4.Query.Del("X-Amz-Signature")
v4.Query.Del("X-Amz-Security-Token")
v4.Query.Del("X-Amz-Date")
v4.Query.Del("X-Amz-Expires")
v4.Query.Del("X-Amz-Credential")
v4.Query.Del("X-Amz-SignedHeaders")
}
func makeHmac(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
func makeSha256(data []byte) []byte {
hash := sha256.New()
hash.Write(data)
return hash.Sum(nil)
}
func makeSha256Reader(reader io.ReadSeeker) []byte {
hash := sha256.New()
start, _ := reader.Seek(0, 1)
defer reader.Seek(start, 0)
io.Copy(hash, reader)
return hash.Sum(nil)
}

View File

@@ -0,0 +1,245 @@
package v4
import (
"net/http"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/stretchr/testify/assert"
)
func buildSigner(serviceName string, region string, signTime time.Time, expireTime time.Duration, body string) signer {
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
reader := strings.NewReader(body)
req, _ := http.NewRequest("POST", endpoint, reader)
req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()"
req.Header.Add("X-Amz-Target", "prefix.Operation")
req.Header.Add("Content-Type", "application/x-amz-json-1.0")
req.Header.Add("Content-Length", string(len(body)))
req.Header.Add("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)")
return signer{
Request: req,
Time: signTime,
ExpireTime: expireTime,
Query: req.URL.Query(),
Body: reader,
ServiceName: serviceName,
Region: region,
Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"),
}
}
func removeWS(text string) string {
text = strings.Replace(text, " ", "", -1)
text = strings.Replace(text, "\n", "", -1)
text = strings.Replace(text, "\t", "", -1)
return text
}
func assertEqual(t *testing.T, expected, given string) {
if removeWS(expected) != removeWS(given) {
t.Errorf("\nExpected: %s\nGiven: %s", expected, given)
}
}
func TestPresignRequest(t *testing.T) {
signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 300*time.Second, "{}")
signer.sign()
expectedDate := "19700101T000000Z"
expectedHeaders := "host;x-amz-meta-other-header;x-amz-target"
expectedSig := "5eeedebf6f995145ce56daa02902d10485246d3defb34f97b973c1f40ab82d36"
expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request"
q := signer.Request.URL.Query()
assert.Equal(t, expectedSig, q.Get("X-Amz-Signature"))
assert.Equal(t, expectedCred, q.Get("X-Amz-Credential"))
assert.Equal(t, expectedHeaders, q.Get("X-Amz-SignedHeaders"))
assert.Equal(t, expectedDate, q.Get("X-Amz-Date"))
}
func TestSignRequest(t *testing.T) {
signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 0, "{}")
signer.sign()
expectedDate := "19700101T000000Z"
expectedSig := "AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/dynamodb/aws4_request, SignedHeaders=host;x-amz-date;x-amz-meta-other-header;x-amz-security-token;x-amz-target, Signature=69ada33fec48180dab153576e4dd80c4e04124f80dda3eccfed8a67c2b91ed5e"
q := signer.Request.Header
assert.Equal(t, expectedSig, q.Get("Authorization"))
assert.Equal(t, expectedDate, q.Get("X-Amz-Date"))
}
func TestSignEmptyBody(t *testing.T) {
signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "")
signer.Body = nil
signer.sign()
hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
assert.Equal(t, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hash)
}
func TestSignBody(t *testing.T) {
signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "hello")
signer.sign()
hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash)
}
func TestSignSeekedBody(t *testing.T) {
signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, " hello")
signer.Body.Read(make([]byte, 3)) // consume first 3 bytes so body is now "hello"
signer.sign()
hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash)
start, _ := signer.Body.Seek(0, 1)
assert.Equal(t, int64(3), start)
}
func TestPresignEmptyBodyS3(t *testing.T) {
signer := buildSigner("s3", "us-east-1", time.Now(), 5*time.Minute, "hello")
signer.sign()
hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
assert.Equal(t, "UNSIGNED-PAYLOAD", hash)
}
func TestSignPrecomputedBodyChecksum(t *testing.T) {
signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "hello")
signer.Request.Header.Set("X-Amz-Content-Sha256", "PRECOMPUTED")
signer.sign()
hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
assert.Equal(t, "PRECOMPUTED", hash)
}
func TestAnonymousCredentials(t *testing.T) {
r := aws.NewRequest(
aws.NewService(&aws.Config{Credentials: credentials.AnonymousCredentials}),
&aws.Operation{
Name: "BatchGetItem",
HTTPMethod: "POST",
HTTPPath: "/",
},
nil,
nil,
)
Sign(r)
urlQ := r.HTTPRequest.URL.Query()
assert.Empty(t, urlQ.Get("X-Amz-Signature"))
assert.Empty(t, urlQ.Get("X-Amz-Credential"))
assert.Empty(t, urlQ.Get("X-Amz-SignedHeaders"))
assert.Empty(t, urlQ.Get("X-Amz-Date"))
hQ := r.HTTPRequest.Header
assert.Empty(t, hQ.Get("Authorization"))
assert.Empty(t, hQ.Get("X-Amz-Date"))
}
func TestIgnoreResignRequestWithValidCreds(t *testing.T) {
r := aws.NewRequest(
aws.NewService(&aws.Config{
Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"),
Region: aws.String("us-west-2"),
}),
&aws.Operation{
Name: "BatchGetItem",
HTTPMethod: "POST",
HTTPPath: "/",
},
nil,
nil,
)
Sign(r)
sig := r.HTTPRequest.Header.Get("Authorization")
Sign(r)
assert.Equal(t, sig, r.HTTPRequest.Header.Get("Authorization"))
}
func TestIgnorePreResignRequestWithValidCreds(t *testing.T) {
r := aws.NewRequest(
aws.NewService(&aws.Config{
Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"),
Region: aws.String("us-west-2"),
}),
&aws.Operation{
Name: "BatchGetItem",
HTTPMethod: "POST",
HTTPPath: "/",
},
nil,
nil,
)
r.ExpireTime = time.Minute * 10
Sign(r)
sig := r.HTTPRequest.Header.Get("X-Amz-Signature")
Sign(r)
assert.Equal(t, sig, r.HTTPRequest.Header.Get("X-Amz-Signature"))
}
func TestResignRequestExpiredCreds(t *testing.T) {
creds := credentials.NewStaticCredentials("AKID", "SECRET", "SESSION")
r := aws.NewRequest(
aws.NewService(&aws.Config{Credentials: creds}),
&aws.Operation{
Name: "BatchGetItem",
HTTPMethod: "POST",
HTTPPath: "/",
},
nil,
nil,
)
Sign(r)
querySig := r.HTTPRequest.Header.Get("Authorization")
creds.Expire()
Sign(r)
assert.NotEqual(t, querySig, r.HTTPRequest.Header.Get("Authorization"))
}
func TestPreResignRequestExpiredCreds(t *testing.T) {
provider := &credentials.StaticProvider{credentials.Value{"AKID", "SECRET", "SESSION"}}
creds := credentials.NewCredentials(provider)
r := aws.NewRequest(
aws.NewService(&aws.Config{Credentials: creds}),
&aws.Operation{
Name: "BatchGetItem",
HTTPMethod: "POST",
HTTPPath: "/",
},
nil,
nil,
)
r.ExpireTime = time.Minute * 10
Sign(r)
querySig := r.HTTPRequest.URL.Query().Get("X-Amz-Signature")
creds.Expire()
r.Time = time.Now().Add(time.Hour * 48)
Sign(r)
assert.NotEqual(t, querySig, r.HTTPRequest.URL.Query().Get("X-Amz-Signature"))
}
func BenchmarkPresignRequest(b *testing.B) {
signer := buildSigner("dynamodb", "us-east-1", time.Now(), 300*time.Second, "{}")
for i := 0; i < b.N; i++ {
signer.sign()
}
}
func BenchmarkSignRequest(b *testing.B) {
signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "{}")
for i := 0; i < b.N; i++ {
signer.sign()
}
}

View File

@@ -0,0 +1,65 @@
// Package endpoints validates regional endpoints for services.
package endpoints
//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
//go:generate gofmt -s -w endpoints_map.go
import (
"fmt"
"regexp"
"strings"
)
// NormalizeEndpoint takes and endpoint and service API information to return a
// normalized endpoint and signing region. If the endpoint is not an empty string
// the service name and region will be used to look up the service's API endpoint.
// If the endpoint is provided the scheme will be added if it is not present.
func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) {
if endpoint == "" {
return EndpointForRegion(serviceName, region, disableSSL)
}
return AddScheme(endpoint, disableSSL), ""
}
// EndpointForRegion returns an endpoint and its signing region for a service and region.
// if the service and region pair are not found endpoint and signingRegion will be empty.
func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) {
derivedKeys := []string{
region + "/" + svcName,
region + "/*",
"*/" + svcName,
"*/*",
}
for _, key := range derivedKeys {
if val, ok := endpointsMap.Endpoints[key]; ok {
ep := val.Endpoint
ep = strings.Replace(ep, "{region}", region, -1)
ep = strings.Replace(ep, "{service}", svcName, -1)
endpoint = ep
signingRegion = val.SigningRegion
break
}
}
return AddScheme(endpoint, disableSSL), signingRegion
}
// Regular expression to determine if the endpoint string is prefixed with a scheme.
var schemeRE = regexp.MustCompile("^([^:]+)://")
// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS.
func AddScheme(endpoint string, disableSSL bool) string {
if endpoint != "" && !schemeRE.MatchString(endpoint) {
scheme := "https"
if disableSSL {
scheme = "http"
}
endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
}
return endpoint
}

View File

@@ -0,0 +1,89 @@
{
"version": 2,
"endpoints": {
"*/*": {
"endpoint": "{service}.{region}.amazonaws.com"
},
"cn-north-1/*": {
"endpoint": "{service}.{region}.amazonaws.com.cn",
"signatureVersion": "v4"
},
"us-gov-west-1/iam": {
"endpoint": "iam.us-gov.amazonaws.com"
},
"us-gov-west-1/sts": {
"endpoint": "sts.us-gov-west-1.amazonaws.com"
},
"us-gov-west-1/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"*/cloudfront": {
"endpoint": "cloudfront.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/cloudsearchdomain": {
"endpoint": "",
"signingRegion": "us-east-1"
},
"*/data.iot": {
"endpoint": "",
"signingRegion": "us-east-1"
},
"*/ec2metadata": {
"endpoint": "http://169.254.169.254/latest",
"signingRegion": "us-east-1"
},
"*/iam": {
"endpoint": "iam.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/importexport": {
"endpoint": "importexport.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/route53": {
"endpoint": "route53.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/sts": {
"endpoint": "sts.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/waf": {
"endpoint": "waf.amazonaws.com",
"signingRegion": "us-east-1"
},
"us-east-1/sdb": {
"endpoint": "sdb.amazonaws.com",
"signingRegion": "us-east-1"
},
"us-east-1/s3": {
"endpoint": "s3.amazonaws.com"
},
"us-west-1/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"us-west-2/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"eu-west-1/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"ap-southeast-1/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"ap-southeast-2/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"ap-northeast-1/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"sa-east-1/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"eu-central-1/s3": {
"endpoint": "{service}.{region}.amazonaws.com",
"signatureVersion": "v4"
}
}
}

View File

@@ -0,0 +1,101 @@
package endpoints
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
type endpointStruct struct {
Version int
Endpoints map[string]endpointEntry
}
type endpointEntry struct {
Endpoint string
SigningRegion string
}
var endpointsMap = endpointStruct{
Version: 2,
Endpoints: map[string]endpointEntry{
"*/*": {
Endpoint: "{service}.{region}.amazonaws.com",
},
"*/cloudfront": {
Endpoint: "cloudfront.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/cloudsearchdomain": {
Endpoint: "",
SigningRegion: "us-east-1",
},
"*/data.iot": {
Endpoint: "",
SigningRegion: "us-east-1",
},
"*/ec2metadata": {
Endpoint: "http://169.254.169.254/latest",
SigningRegion: "us-east-1",
},
"*/iam": {
Endpoint: "iam.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/importexport": {
Endpoint: "importexport.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/route53": {
Endpoint: "route53.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/sts": {
Endpoint: "sts.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/waf": {
Endpoint: "waf.amazonaws.com",
SigningRegion: "us-east-1",
},
"ap-northeast-1/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"ap-southeast-1/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"ap-southeast-2/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"cn-north-1/*": {
Endpoint: "{service}.{region}.amazonaws.com.cn",
},
"eu-central-1/s3": {
Endpoint: "{service}.{region}.amazonaws.com",
},
"eu-west-1/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"sa-east-1/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"us-east-1/s3": {
Endpoint: "s3.amazonaws.com",
},
"us-east-1/sdb": {
Endpoint: "sdb.amazonaws.com",
SigningRegion: "us-east-1",
},
"us-gov-west-1/iam": {
Endpoint: "iam.us-gov.amazonaws.com",
},
"us-gov-west-1/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"us-gov-west-1/sts": {
Endpoint: "sts.us-gov-west-1.amazonaws.com",
},
"us-west-1/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"us-west-2/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
},
}

View File

@@ -0,0 +1,41 @@
package endpoints_test
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/aws/aws-sdk-go/private/endpoints"
)
func TestGenericEndpoint(t *testing.T) {
name := "service"
region := "mock-region-1"
ep, sr := endpoints.EndpointForRegion(name, region, false)
assert.Equal(t, fmt.Sprintf("https://%s.%s.amazonaws.com", name, region), ep)
assert.Empty(t, sr)
}
func TestGlobalEndpoints(t *testing.T) {
region := "mock-region-1"
svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts", "waf"}
for _, name := range svcs {
ep, sr := endpoints.EndpointForRegion(name, region, false)
assert.Equal(t, fmt.Sprintf("https://%s.amazonaws.com", name), ep)
assert.Equal(t, "us-east-1", sr)
}
}
func TestServicesInCN(t *testing.T) {
region := "cn-north-1"
svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts", "s3", "waf"}
for _, name := range svcs {
ep, sr := endpoints.EndpointForRegion(name, region, false)
assert.Equal(t, fmt.Sprintf("https://%s.%s.amazonaws.com.cn", name, region), ep)
assert.Empty(t, sr)
}
}

View File

@@ -0,0 +1,32 @@
// Package ec2query provides serialisation of AWS EC2 requests and responses.
package ec2query
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/ec2.json build_test.go
import (
"net/url"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
)
// Build builds a request for the EC2 protocol.
func Build(r *request.Request) {
body := url.Values{
"Action": {r.Operation.Name},
"Version": {r.ClientInfo.APIVersion},
}
if err := queryutil.Parse(body, r.Params, true); err != nil {
r.Error = awserr.New("SerializationError", "failed encoding EC2 Query request", err)
}
if r.ExpireTime == 0 {
r.HTTPRequest.Method = "POST"
r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
r.SetBufferBody([]byte(body.Encode()))
} else { // This is a pre-signed request
r.HTTPRequest.Method = "GET"
r.HTTPRequest.URL.RawQuery = body.Encode()
}
}

View File

@@ -0,0 +1,85 @@
// +build bench
package ec2query_test
import (
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/awstesting"
"github.com/aws/aws-sdk-go/private/protocol/ec2query"
"github.com/aws/aws-sdk-go/service/ec2"
)
func BenchmarkEC2QueryBuild_Complex_ec2AuthorizeSecurityGroupEgress(b *testing.B) {
params := &ec2.AuthorizeSecurityGroupEgressInput{
GroupId: aws.String("String"), // Required
CidrIp: aws.String("String"),
DryRun: aws.Bool(true),
FromPort: aws.Int64(1),
IpPermissions: []*ec2.IpPermission{
{ // Required
FromPort: aws.Int64(1),
IpProtocol: aws.String("String"),
IpRanges: []*ec2.IpRange{
{ // Required
CidrIp: aws.String("String"),
},
// More values...
},
PrefixListIds: []*ec2.PrefixListId{
{ // Required
PrefixListId: aws.String("String"),
},
// More values...
},
ToPort: aws.Int64(1),
UserIdGroupPairs: []*ec2.UserIdGroupPair{
{ // Required
GroupId: aws.String("String"),
GroupName: aws.String("String"),
UserId: aws.String("String"),
},
// More values...
},
},
// More values...
},
IpProtocol: aws.String("String"),
SourceSecurityGroupName: aws.String("String"),
SourceSecurityGroupOwnerId: aws.String("String"),
ToPort: aws.Int64(1),
}
benchEC2QueryBuild(b, "AuthorizeSecurityGroupEgress", params)
}
func BenchmarkEC2QueryBuild_Simple_ec2AttachNetworkInterface(b *testing.B) {
params := &ec2.AttachNetworkInterfaceInput{
DeviceIndex: aws.Int64(1), // Required
InstanceId: aws.String("String"), // Required
NetworkInterfaceId: aws.String("String"), // Required
DryRun: aws.Bool(true),
}
benchEC2QueryBuild(b, "AttachNetworkInterface", params)
}
func benchEC2QueryBuild(b *testing.B, opName string, params interface{}) {
svc := awstesting.NewClient()
svc.ServiceName = "ec2"
svc.APIVersion = "2015-04-15"
for i := 0; i < b.N; i++ {
r := svc.NewRequest(&request.Operation{
Name: opName,
HTTPMethod: "POST",
HTTPPath: "/",
}, params, nil)
ec2query.Build(r)
if r.Error != nil {
b.Fatal("Unexpected error", r.Error)
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,54 @@
package ec2query
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/ec2.json unmarshal_test.go
import (
"encoding/xml"
"io"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
)
// Unmarshal unmarshals a response body for the EC2 protocol.
func Unmarshal(r *request.Request) {
defer r.HTTPResponse.Body.Close()
if r.DataFilled() {
decoder := xml.NewDecoder(r.HTTPResponse.Body)
err := xmlutil.UnmarshalXML(r.Data, decoder, "")
if err != nil {
r.Error = awserr.New("SerializationError", "failed decoding EC2 Query response", err)
return
}
}
}
// UnmarshalMeta unmarshals response headers for the EC2 protocol.
func UnmarshalMeta(r *request.Request) {
// TODO implement unmarshaling of request IDs
}
type xmlErrorResponse struct {
XMLName xml.Name `xml:"Response"`
Code string `xml:"Errors>Error>Code"`
Message string `xml:"Errors>Error>Message"`
RequestID string `xml:"RequestId"`
}
// UnmarshalError unmarshals a response error for the EC2 protocol.
func UnmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
resp := &xmlErrorResponse{}
err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
if err != nil && err != io.EOF {
r.Error = awserr.New("SerializationError", "failed decoding EC2 Query error response", err)
} else {
r.Error = awserr.NewRequestFailure(
awserr.New(resp.Code, resp.Message, nil),
r.HTTPResponse.StatusCode,
resp.RequestID,
)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,33 @@
// Package query provides serialisation of AWS query requests, and responses.
package query
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go
import (
"net/url"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
)
// Build builds a request for an AWS Query service.
func Build(r *request.Request) {
body := url.Values{
"Action": {r.Operation.Name},
"Version": {r.ClientInfo.APIVersion},
}
if err := queryutil.Parse(body, r.Params, false); err != nil {
r.Error = awserr.New("SerializationError", "failed encoding Query request", err)
return
}
if r.ExpireTime == 0 {
r.HTTPRequest.Method = "POST"
r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
r.SetBufferBody([]byte(body.Encode()))
} else { // This is a pre-signed request
r.HTTPRequest.Method = "GET"
r.HTTPRequest.URL.RawQuery = body.Encode()
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,223 @@
package queryutil
import (
"encoding/base64"
"fmt"
"net/url"
"reflect"
"sort"
"strconv"
"strings"
"time"
)
// Parse parses an object i and fills a url.Values object. The isEC2 flag
// indicates if this is the EC2 Query sub-protocol.
func Parse(body url.Values, i interface{}, isEC2 bool) error {
q := queryParser{isEC2: isEC2}
return q.parseValue(body, reflect.ValueOf(i), "", "")
}
func elemOf(value reflect.Value) reflect.Value {
for value.Kind() == reflect.Ptr {
value = value.Elem()
}
return value
}
type queryParser struct {
isEC2 bool
}
func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
value = elemOf(value)
// no need to handle zero values
if !value.IsValid() {
return nil
}
t := tag.Get("type")
if t == "" {
switch value.Kind() {
case reflect.Struct:
t = "structure"
case reflect.Slice:
t = "list"
case reflect.Map:
t = "map"
}
}
switch t {
case "structure":
return q.parseStruct(v, value, prefix)
case "list":
return q.parseList(v, value, prefix, tag)
case "map":
return q.parseMap(v, value, prefix, tag)
default:
return q.parseScalar(v, value, prefix, tag)
}
}
func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error {
if !value.IsValid() {
return nil
}
t := value.Type()
for i := 0; i < value.NumField(); i++ {
if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c {
continue // ignore unexported fields
}
elemValue := elemOf(value.Field(i))
field := t.Field(i)
var name string
if q.isEC2 {
name = field.Tag.Get("queryName")
}
if name == "" {
if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
name = field.Tag.Get("locationNameList")
} else if locName := field.Tag.Get("locationName"); locName != "" {
name = locName
}
if name != "" && q.isEC2 {
name = strings.ToUpper(name[0:1]) + name[1:]
}
}
if name == "" {
name = field.Name
}
if prefix != "" {
name = prefix + "." + name
}
if err := q.parseValue(v, elemValue, name, field.Tag); err != nil {
return err
}
}
return nil
}
func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
// If it's empty, generate an empty value
if !value.IsNil() && value.Len() == 0 {
v.Set(prefix, "")
return nil
}
// check for unflattened list member
if !q.isEC2 && tag.Get("flattened") == "" {
prefix += ".member"
}
for i := 0; i < value.Len(); i++ {
slicePrefix := prefix
if slicePrefix == "" {
slicePrefix = strconv.Itoa(i + 1)
} else {
slicePrefix = slicePrefix + "." + strconv.Itoa(i+1)
}
if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil {
return err
}
}
return nil
}
func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
// If it's empty, generate an empty value
if !value.IsNil() && value.Len() == 0 {
v.Set(prefix, "")
return nil
}
// check for unflattened list member
if !q.isEC2 && tag.Get("flattened") == "" {
prefix += ".entry"
}
// sort keys for improved serialization consistency.
// this is not strictly necessary for protocol support.
mapKeyValues := value.MapKeys()
mapKeys := map[string]reflect.Value{}
mapKeyNames := make([]string, len(mapKeyValues))
for i, mapKey := range mapKeyValues {
name := mapKey.String()
mapKeys[name] = mapKey
mapKeyNames[i] = name
}
sort.Strings(mapKeyNames)
for i, mapKeyName := range mapKeyNames {
mapKey := mapKeys[mapKeyName]
mapValue := value.MapIndex(mapKey)
kname := tag.Get("locationNameKey")
if kname == "" {
kname = "key"
}
vname := tag.Get("locationNameValue")
if vname == "" {
vname = "value"
}
// serialize key
var keyName string
if prefix == "" {
keyName = strconv.Itoa(i+1) + "." + kname
} else {
keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname
}
if err := q.parseValue(v, mapKey, keyName, ""); err != nil {
return err
}
// serialize value
var valueName string
if prefix == "" {
valueName = strconv.Itoa(i+1) + "." + vname
} else {
valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname
}
if err := q.parseValue(v, mapValue, valueName, ""); err != nil {
return err
}
}
return nil
}
func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error {
switch value := r.Interface().(type) {
case string:
v.Set(name, value)
case []byte:
if !r.IsNil() {
v.Set(name, base64.StdEncoding.EncodeToString(value))
}
case bool:
v.Set(name, strconv.FormatBool(value))
case int64:
v.Set(name, strconv.FormatInt(value, 10))
case int:
v.Set(name, strconv.Itoa(value))
case float64:
v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
case float32:
v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
case time.Time:
const ISO8601UTC = "2006-01-02T15:04:05Z"
v.Set(name, value.UTC().Format(ISO8601UTC))
default:
return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
}
return nil
}

View File

@@ -0,0 +1,29 @@
package query
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go
import (
"encoding/xml"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
)
// Unmarshal unmarshals a response for an AWS Query service.
func Unmarshal(r *request.Request) {
defer r.HTTPResponse.Body.Close()
if r.DataFilled() {
decoder := xml.NewDecoder(r.HTTPResponse.Body)
err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
if err != nil {
r.Error = awserr.New("SerializationError", "failed decoding Query response", err)
return
}
}
}
// UnmarshalMeta unmarshals header response values for an AWS Query service.
func UnmarshalMeta(r *request.Request) {
// TODO implement unmarshaling of request IDs
}

View File

@@ -0,0 +1,33 @@
package query
import (
"encoding/xml"
"io"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
type xmlErrorResponse struct {
XMLName xml.Name `xml:"ErrorResponse"`
Code string `xml:"Error>Code"`
Message string `xml:"Error>Message"`
RequestID string `xml:"RequestId"`
}
// UnmarshalError unmarshals an error response for an AWS Query service.
func UnmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
resp := &xmlErrorResponse{}
err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
if err != nil && err != io.EOF {
r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err)
} else {
r.Error = awserr.NewRequestFailure(
awserr.New(resp.Code, resp.Message, nil),
r.HTTPResponse.StatusCode,
resp.RequestID,
)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,254 @@
// Package rest provides RESTful serialization of AWS requests and responses.
package rest
import (
"bytes"
"encoding/base64"
"fmt"
"io"
"net/http"
"net/url"
"path"
"reflect"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
// RFC822 returns an RFC822 formatted timestamp for AWS protocols
const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT"
// Whether the byte value can be sent without escaping in AWS URLs
var noEscape [256]bool
var errValueNotSet = fmt.Errorf("value not set")
func init() {
for i := 0; i < len(noEscape); i++ {
// AWS expects every character except these to be escaped
noEscape[i] = (i >= 'A' && i <= 'Z') ||
(i >= 'a' && i <= 'z') ||
(i >= '0' && i <= '9') ||
i == '-' ||
i == '.' ||
i == '_' ||
i == '~'
}
}
// Build builds the REST component of a service request.
func Build(r *request.Request) {
if r.ParamsFilled() {
v := reflect.ValueOf(r.Params).Elem()
buildLocationElements(r, v)
buildBody(r, v)
}
}
func buildLocationElements(r *request.Request, v reflect.Value) {
query := r.HTTPRequest.URL.Query()
for i := 0; i < v.NumField(); i++ {
m := v.Field(i)
if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
continue
}
if m.IsValid() {
field := v.Type().Field(i)
name := field.Tag.Get("locationName")
if name == "" {
name = field.Name
}
if m.Kind() == reflect.Ptr {
m = m.Elem()
}
if !m.IsValid() {
continue
}
var err error
switch field.Tag.Get("location") {
case "headers": // header maps
err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName"))
case "header":
err = buildHeader(&r.HTTPRequest.Header, m, name)
case "uri":
err = buildURI(r.HTTPRequest.URL, m, name)
case "querystring":
err = buildQueryString(query, m, name)
}
r.Error = err
}
if r.Error != nil {
return
}
}
r.HTTPRequest.URL.RawQuery = query.Encode()
updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path)
}
func buildBody(r *request.Request, v reflect.Value) {
if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
if payloadName := field.Tag.Get("payload"); payloadName != "" {
pfield, _ := v.Type().FieldByName(payloadName)
if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
payload := reflect.Indirect(v.FieldByName(payloadName))
if payload.IsValid() && payload.Interface() != nil {
switch reader := payload.Interface().(type) {
case io.ReadSeeker:
r.SetReaderBody(reader)
case []byte:
r.SetBufferBody(reader)
case string:
r.SetStringBody(reader)
default:
r.Error = awserr.New("SerializationError",
"failed to encode REST request",
fmt.Errorf("unknown payload type %s", payload.Type()))
}
}
}
}
}
}
func buildHeader(header *http.Header, v reflect.Value, name string) error {
str, err := convertType(v)
if err == errValueNotSet {
return nil
} else if err != nil {
return awserr.New("SerializationError", "failed to encode REST request", err)
}
header.Add(name, str)
return nil
}
func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error {
for _, key := range v.MapKeys() {
str, err := convertType(v.MapIndex(key))
if err == errValueNotSet {
continue
} else if err != nil {
return awserr.New("SerializationError", "failed to encode REST request", err)
}
header.Add(prefix+key.String(), str)
}
return nil
}
func buildURI(u *url.URL, v reflect.Value, name string) error {
value, err := convertType(v)
if err == errValueNotSet {
return nil
} else if err != nil {
return awserr.New("SerializationError", "failed to encode REST request", err)
}
uri := u.Path
uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1)
uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1)
u.Path = uri
return nil
}
func buildQueryString(query url.Values, v reflect.Value, name string) error {
switch value := v.Interface().(type) {
case []*string:
for _, item := range value {
query.Add(name, *item)
}
case map[string]*string:
for key, item := range value {
query.Add(key, *item)
}
case map[string][]*string:
for key, items := range value {
for _, item := range items {
query.Add(key, *item)
}
}
default:
str, err := convertType(v)
if err == errValueNotSet {
return nil
} else if err != nil {
return awserr.New("SerializationError", "failed to encode REST request", err)
}
query.Set(name, str)
}
return nil
}
func updatePath(url *url.URL, urlPath string) {
scheme, query := url.Scheme, url.RawQuery
hasSlash := strings.HasSuffix(urlPath, "/")
// clean up path
urlPath = path.Clean(urlPath)
if hasSlash && !strings.HasSuffix(urlPath, "/") {
urlPath += "/"
}
// get formatted URL minus scheme so we can build this into Opaque
url.Scheme, url.Path, url.RawQuery = "", "", ""
s := url.String()
url.Scheme = scheme
url.RawQuery = query
// build opaque URI
url.Opaque = s + urlPath
}
// EscapePath escapes part of a URL path in Amazon style
func EscapePath(path string, encodeSep bool) string {
var buf bytes.Buffer
for i := 0; i < len(path); i++ {
c := path[i]
if noEscape[c] || (c == '/' && !encodeSep) {
buf.WriteByte(c)
} else {
buf.WriteByte('%')
buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16)))
}
}
return buf.String()
}
func convertType(v reflect.Value) (string, error) {
v = reflect.Indirect(v)
if !v.IsValid() {
return "", errValueNotSet
}
var str string
switch value := v.Interface().(type) {
case string:
str = value
case []byte:
str = base64.StdEncoding.EncodeToString(value)
case bool:
str = strconv.FormatBool(value)
case int64:
str = strconv.FormatInt(value, 10)
case float64:
str = strconv.FormatFloat(value, 'f', -1, 64)
case time.Time:
str = value.UTC().Format(RFC822)
default:
err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
return "", err
}
return str, nil
}

View File

@@ -0,0 +1,45 @@
package rest
import "reflect"
// PayloadMember returns the payload field member of i if there is one, or nil.
func PayloadMember(i interface{}) interface{} {
if i == nil {
return nil
}
v := reflect.ValueOf(i).Elem()
if !v.IsValid() {
return nil
}
if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
if payloadName := field.Tag.Get("payload"); payloadName != "" {
field, _ := v.Type().FieldByName(payloadName)
if field.Tag.Get("type") != "structure" {
return nil
}
payload := v.FieldByName(payloadName)
if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) {
return payload.Interface()
}
}
}
return nil
}
// PayloadType returns the type of a payload field member of i if there is one, or "".
func PayloadType(i interface{}) string {
v := reflect.Indirect(reflect.ValueOf(i))
if !v.IsValid() {
return ""
}
if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
if payloadName := field.Tag.Get("payload"); payloadName != "" {
if member, ok := v.Type().FieldByName(payloadName); ok {
return member.Tag.Get("type")
}
}
}
return ""
}

Some files were not shown because too many files have changed in this diff Show More