From d61931341176dad9ccff7c967a10d88fe54218fa Mon Sep 17 00:00:00 2001 From: Toshiaki Takahashi Date: Thu, 6 Sep 2018 09:04:29 +0000 Subject: src: Add DMA localagent Change-Id: Ibcee814fbc9a904448eeb368a1a26bbb69cf54aa Signed-off-by: Toshiaki Takahashi --- src/dma/Gopkg.lock | 133 + src/dma/Gopkg.toml | 51 + src/dma/cmd/infofetch/daemon.go | 103 + src/dma/cmd/infofetch/openstack.go | 522 +++ src/dma/cmd/infofetch/virsh_domain.go | 264 ++ src/dma/cmd/server/agent.go | 31 + src/dma/cmd/server/amqp.go | 108 + src/dma/cmd/server/api.go | 85 + src/dma/cmd/server/main.go | 85 + src/dma/cmd/threshold/evaluate.go | 37 + src/dma/cmd/threshold/main.go | 107 + src/dma/cmd/threshold/read.go | 82 + src/dma/cmd/threshold/transmit.go | 97 + src/dma/examples/config.toml | 42 + src/dma/pkg/common/redispool.go | 72 + src/dma/pkg/common/types.go | 29 + .../sampleconf/demo-memory-linearreg/analysis.conf | 12 + .../demo-memory-linearreg/evaluator.conf | 35 + .../demo-memory-linearreg/read-metrics.conf | 7 + .../demo-memory-linearreg/transmitter.conf | 11 + .../demo-memory-linearreg/write-redis.conf | 10 + src/dma/sampleconf/simple-threshold/evaluator.conf | 7 + .../sampleconf/simple-threshold/read-metrics.conf | 17 + .../sampleconf/simple-threshold/transmitter.conf | 11 + .../sampleconf/simple-threshold/write-redis.conf | 10 + .../vendor/github.com/BurntSushi/toml/.gitignore | 5 + .../vendor/github.com/BurntSushi/toml/.travis.yml | 15 + .../vendor/github.com/BurntSushi/toml/COMPATIBLE | 3 + src/dma/vendor/github.com/BurntSushi/toml/COPYING | 14 + src/dma/vendor/github.com/BurntSushi/toml/Makefile | 19 + .../vendor/github.com/BurntSushi/toml/README.md | 218 + .../BurntSushi/toml/cmd/toml-test-decoder/COPYING | 14 + .../BurntSushi/toml/cmd/toml-test-encoder/COPYING | 14 + .../github.com/BurntSushi/toml/cmd/tomlv/COPYING | 14 + .../vendor/github.com/BurntSushi/toml/decode.go | 509 +++ .../github.com/BurntSushi/toml/decode_meta.go | 121 + src/dma/vendor/github.com/BurntSushi/toml/doc.go | 27 + .../vendor/github.com/BurntSushi/toml/encode.go | 568 +++ .../github.com/BurntSushi/toml/encoding_types.go | 19 + .../BurntSushi/toml/encoding_types_1.1.go | 18 + src/dma/vendor/github.com/BurntSushi/toml/lex.go | 953 ++++ src/dma/vendor/github.com/BurntSushi/toml/parse.go | 592 +++ .../vendor/github.com/BurntSushi/toml/session.vim | 1 + .../github.com/BurntSushi/toml/type_check.go | 91 + .../github.com/BurntSushi/toml/type_fields.go | 242 + .../vendor/github.com/go-redis/redis/.gitignore | 2 + .../vendor/github.com/go-redis/redis/.travis.yml | 20 + .../vendor/github.com/go-redis/redis/CHANGELOG.md | 12 + src/dma/vendor/github.com/go-redis/redis/LICENSE | 25 + src/dma/vendor/github.com/go-redis/redis/Makefile | 20 + src/dma/vendor/github.com/go-redis/redis/README.md | 146 + .../vendor/github.com/go-redis/redis/cluster.go | 1624 +++++++ .../github.com/go-redis/redis/cluster_commands.go | 22 + .../vendor/github.com/go-redis/redis/command.go | 1225 +++++ .../vendor/github.com/go-redis/redis/commands.go | 2337 ++++++++++ src/dma/vendor/github.com/go-redis/redis/doc.go | 4 + .../internal/consistenthash/consistenthash.go | 81 + .../github.com/go-redis/redis/internal/error.go | 83 + .../go-redis/redis/internal/hashtag/hashtag.go | 77 + .../github.com/go-redis/redis/internal/internal.go | 24 + .../github.com/go-redis/redis/internal/log.go | 15 + .../github.com/go-redis/redis/internal/once.go | 60 + .../go-redis/redis/internal/pool/conn.go | 80 + .../go-redis/redis/internal/pool/pool.go | 416 ++ .../go-redis/redis/internal/pool/pool_single.go | 53 + .../go-redis/redis/internal/pool/pool_sticky.go | 109 + .../go-redis/redis/internal/proto/reader.go | 316 ++ .../go-redis/redis/internal/proto/scan.go | 166 + .../go-redis/redis/internal/proto/write_buffer.go | 113 + .../redis/internal/singleflight/singleflight.go | 64 + .../github.com/go-redis/redis/internal/util.go | 29 + .../go-redis/redis/internal/util/safe.go | 7 + .../go-redis/redis/internal/util/strconv.go | 19 + .../go-redis/redis/internal/util/unsafe.go | 12 + .../vendor/github.com/go-redis/redis/iterator.go | 73 + .../vendor/github.com/go-redis/redis/options.go | 203 + src/dma/vendor/github.com/go-redis/redis/parser.go | 394 ++ .../vendor/github.com/go-redis/redis/pipeline.go | 113 + src/dma/vendor/github.com/go-redis/redis/pubsub.go | 460 ++ src/dma/vendor/github.com/go-redis/redis/redis.go | 501 ++ src/dma/vendor/github.com/go-redis/redis/result.go | 140 + src/dma/vendor/github.com/go-redis/redis/ring.go | 622 +++ src/dma/vendor/github.com/go-redis/redis/script.go | 62 + .../vendor/github.com/go-redis/redis/sentinel.go | 343 ++ src/dma/vendor/github.com/go-redis/redis/tx.go | 110 + .../vendor/github.com/go-redis/redis/universal.go | 151 + .../vendor/github.com/labstack/echo/.editorconfig | 25 + .../vendor/github.com/labstack/echo/.gitattributes | 20 + src/dma/vendor/github.com/labstack/echo/.gitignore | 7 + .../vendor/github.com/labstack/echo/.travis.yml | 14 + src/dma/vendor/github.com/labstack/echo/Gopkg.lock | 75 + src/dma/vendor/github.com/labstack/echo/Gopkg.toml | 42 + src/dma/vendor/github.com/labstack/echo/LICENSE | 21 + src/dma/vendor/github.com/labstack/echo/Makefile | 17 + src/dma/vendor/github.com/labstack/echo/README.md | 99 + src/dma/vendor/github.com/labstack/echo/bind.go | 261 ++ src/dma/vendor/github.com/labstack/echo/context.go | 576 +++ src/dma/vendor/github.com/labstack/echo/echo.go | 781 ++++ src/dma/vendor/github.com/labstack/echo/group.go | 120 + src/dma/vendor/github.com/labstack/echo/log.go | 40 + .../vendor/github.com/labstack/echo/response.go | 110 + src/dma/vendor/github.com/labstack/echo/router.go | 434 ++ src/dma/vendor/github.com/labstack/gommon/LICENSE | 22 + .../github.com/labstack/gommon/color/README.md | 86 + .../github.com/labstack/gommon/color/color.go | 407 ++ .../github.com/labstack/gommon/log/README.md | 5 + .../vendor/github.com/labstack/gommon/log/color.go | 13 + .../vendor/github.com/labstack/gommon/log/log.go | 415 ++ .../vendor/github.com/labstack/gommon/log/white.go | 12 + .../github.com/libvirt/libvirt-go/.gitignore | 7 + .../github.com/libvirt/libvirt-go/.gitpublish | 4 + .../github.com/libvirt/libvirt-go/.travis.yml | 42 + .../vendor/github.com/libvirt/libvirt-go/FAQ.md | 19 + .../vendor/github.com/libvirt/libvirt-go/LICENSE | 21 + .../vendor/github.com/libvirt/libvirt-go/README.md | 136 + .../github.com/libvirt/libvirt-go/callbacks.go | 102 + .../libvirt/libvirt-go/callbacks_wrapper.go | 39 + .../libvirt/libvirt-go/callbacks_wrapper.h | 32 + .../github.com/libvirt/libvirt-go/connect.go | 2998 ++++++++++++ .../github.com/libvirt/libvirt-go/connect_compat.h | 200 + .../libvirt/libvirt-go/connect_wrapper.go | 1766 ++++++++ .../libvirt/libvirt-go/connect_wrapper.h | 730 +++ .../vendor/github.com/libvirt/libvirt-go/doc.go | 141 + .../vendor/github.com/libvirt/libvirt-go/domain.go | 4766 ++++++++++++++++++++ .../github.com/libvirt/libvirt-go/domain_compat.h | 914 ++++ .../github.com/libvirt/libvirt-go/domain_events.go | 1633 +++++++ .../libvirt/libvirt-go/domain_events_wrapper.go | 259 ++ .../libvirt/libvirt-go/domain_events_wrapper.h | 207 + .../libvirt/libvirt-go/domain_snapshot.go | 240 + .../libvirt/libvirt-go/domain_snapshot_wrapper.go | 215 + .../libvirt/libvirt-go/domain_snapshot_wrapper.h | 102 + .../libvirt/libvirt-go/domain_wrapper.go | 2330 ++++++++++ .../github.com/libvirt/libvirt-go/domain_wrapper.h | 986 ++++ .../vendor/github.com/libvirt/libvirt-go/error.go | 604 +++ .../github.com/libvirt/libvirt-go/error_compat.h | 166 + .../vendor/github.com/libvirt/libvirt-go/events.go | 258 ++ .../libvirt/libvirt-go/events_wrapper.go | 193 + .../github.com/libvirt/libvirt-go/events_wrapper.h | 82 + .../github.com/libvirt/libvirt-go/interface.go | 145 + .../libvirt/libvirt-go/interface_wrapper.go | 158 + .../libvirt/libvirt-go/interface_wrapper.h | 76 + .../github.com/libvirt/libvirt-go/libvirtd.conf | 6 + .../github.com/libvirt/libvirt-go/libvirtd.sasl | 2 + .../vendor/github.com/libvirt/libvirt-go/lxc.go | 155 + .../github.com/libvirt/libvirt-go/lxc_wrapper.go | 101 + .../github.com/libvirt/libvirt-go/lxc_wrapper.h | 63 + .../github.com/libvirt/libvirt-go/network.go | 335 ++ .../github.com/libvirt/libvirt-go/network_compat.h | 86 + .../libvirt/libvirt-go/network_events.go | 125 + .../libvirt/libvirt-go/network_events_wrapper.go | 79 + .../libvirt/libvirt-go/network_events_wrapper.h | 55 + .../libvirt/libvirt-go/network_wrapper.go | 267 ++ .../libvirt/libvirt-go/network_wrapper.h | 119 + .../github.com/libvirt/libvirt-go/node_device.go | 192 + .../libvirt/libvirt-go/node_device_compat.h | 55 + .../libvirt/libvirt-go/node_device_events.go | 156 + .../libvirt-go/node_device_events_wrapper.go | 88 + .../libvirt-go/node_device_events_wrapper.h | 60 + .../libvirt/libvirt-go/node_device_wrapper.go | 184 + .../libvirt/libvirt-go/node_device_wrapper.h | 88 + .../github.com/libvirt/libvirt-go/nwfilter.go | 118 + .../libvirt/libvirt-go/nwfilter_binding.go | 125 + .../libvirt/libvirt-go/nwfilter_binding_compat.h | 33 + .../libvirt/libvirt-go/nwfilter_binding_wrapper.go | 132 + .../libvirt/libvirt-go/nwfilter_binding_wrapper.h | 60 + .../libvirt/libvirt-go/nwfilter_wrapper.go | 122 + .../libvirt/libvirt-go/nwfilter_wrapper.h | 65 + .../vendor/github.com/libvirt/libvirt-go/qemu.go | 189 + .../github.com/libvirt/libvirt-go/qemu_compat.h | 57 + .../github.com/libvirt/libvirt-go/qemu_wrapper.go | 133 + .../github.com/libvirt/libvirt-go/qemu_wrapper.h | 78 + .../vendor/github.com/libvirt/libvirt-go/secret.go | 184 + .../github.com/libvirt/libvirt-go/secret_compat.h | 61 + .../github.com/libvirt/libvirt-go/secret_events.go | 159 + .../libvirt/libvirt-go/secret_events_wrapper.go | 88 + .../libvirt/libvirt-go/secret_events_wrapper.h | 58 + .../libvirt/libvirt-go/secret_wrapper.go | 175 + .../github.com/libvirt/libvirt-go/secret_wrapper.h | 86 + .../github.com/libvirt/libvirt-go/storage_pool.go | 394 ++ .../libvirt/libvirt-go/storage_pool_compat.h | 92 + .../libvirt/libvirt-go/storage_pool_events.go | 174 + .../libvirt-go/storage_pool_events_wrapper.go | 89 + .../libvirt-go/storage_pool_events_wrapper.h | 59 + .../libvirt/libvirt-go/storage_pool_wrapper.go | 343 ++ .../libvirt/libvirt-go/storage_pool_wrapper.h | 150 + .../libvirt/libvirt-go/storage_volume.go | 290 ++ .../libvirt/libvirt-go/storage_volume_compat.h | 78 + .../libvirt/libvirt-go/storage_volume_wrapper.go | 249 + .../libvirt/libvirt-go/storage_volume_wrapper.h | 116 + .../vendor/github.com/libvirt/libvirt-go/stream.go | 418 ++ .../github.com/libvirt/libvirt-go/stream_compat.h | 36 + .../libvirt/libvirt-go/stream_wrapper.go | 331 ++ .../github.com/libvirt/libvirt-go/stream_wrapper.h | 120 + .../github.com/libvirt/libvirt-go/typedparams.go | 262 ++ .../github.com/mattn/go-colorable/.travis.yml | 9 + .../vendor/github.com/mattn/go-colorable/LICENSE | 21 + .../vendor/github.com/mattn/go-colorable/README.md | 48 + .../mattn/go-colorable/colorable_appengine.go | 29 + .../mattn/go-colorable/colorable_others.go | 30 + .../mattn/go-colorable/colorable_windows.go | 884 ++++ .../github.com/mattn/go-colorable/noncolorable.go | 55 + .../vendor/github.com/mattn/go-isatty/.travis.yml | 9 + src/dma/vendor/github.com/mattn/go-isatty/LICENSE | 9 + .../vendor/github.com/mattn/go-isatty/README.md | 50 + src/dma/vendor/github.com/mattn/go-isatty/doc.go | 2 + .../github.com/mattn/go-isatty/isatty_appengine.go | 15 + .../github.com/mattn/go-isatty/isatty_bsd.go | 18 + .../github.com/mattn/go-isatty/isatty_linux.go | 18 + .../mattn/go-isatty/isatty_linux_ppc64x.go | 19 + .../github.com/mattn/go-isatty/isatty_others.go | 10 + .../github.com/mattn/go-isatty/isatty_solaris.go | 16 + .../github.com/mattn/go-isatty/isatty_windows.go | 94 + .../vendor/github.com/streadway/amqp/.gitignore | 4 + .../vendor/github.com/streadway/amqp/.travis.yml | 18 + .../github.com/streadway/amqp/CONTRIBUTING.md | 35 + src/dma/vendor/github.com/streadway/amqp/LICENSE | 23 + src/dma/vendor/github.com/streadway/amqp/README.md | 93 + .../vendor/github.com/streadway/amqp/allocator.go | 106 + src/dma/vendor/github.com/streadway/amqp/auth.go | 46 + src/dma/vendor/github.com/streadway/amqp/certs.sh | 159 + .../vendor/github.com/streadway/amqp/channel.go | 1587 +++++++ .../vendor/github.com/streadway/amqp/confirms.go | 94 + .../vendor/github.com/streadway/amqp/connection.go | 842 ++++ .../vendor/github.com/streadway/amqp/consumers.go | 142 + .../vendor/github.com/streadway/amqp/delivery.go | 173 + src/dma/vendor/github.com/streadway/amqp/doc.go | 108 + src/dma/vendor/github.com/streadway/amqp/fuzz.go | 17 + src/dma/vendor/github.com/streadway/amqp/gen.sh | 2 + .../vendor/github.com/streadway/amqp/pre-commit | 29 + src/dma/vendor/github.com/streadway/amqp/read.go | 456 ++ src/dma/vendor/github.com/streadway/amqp/return.go | 64 + .../vendor/github.com/streadway/amqp/spec091.go | 3306 ++++++++++++++ src/dma/vendor/github.com/streadway/amqp/types.go | 427 ++ src/dma/vendor/github.com/streadway/amqp/uri.go | 167 + src/dma/vendor/github.com/streadway/amqp/write.go | 411 ++ .../github.com/valyala/bytebufferpool/.travis.yml | 15 + .../github.com/valyala/bytebufferpool/LICENSE | 22 + .../github.com/valyala/bytebufferpool/README.md | 21 + .../valyala/bytebufferpool/bytebuffer.go | 111 + .../github.com/valyala/bytebufferpool/doc.go | 7 + .../github.com/valyala/bytebufferpool/pool.go | 151 + .../vendor/github.com/valyala/fasttemplate/LICENSE | 22 + .../github.com/valyala/fasttemplate/README.md | 85 + .../github.com/valyala/fasttemplate/template.go | 317 ++ .../github.com/valyala/fasttemplate/unsafe.go | 22 + .../github.com/valyala/fasttemplate/unsafe_gae.go | 11 + src/dma/vendor/golang.org/x/crypto/AUTHORS | 3 + src/dma/vendor/golang.org/x/crypto/CONTRIBUTORS | 3 + src/dma/vendor/golang.org/x/crypto/LICENSE | 27 + src/dma/vendor/golang.org/x/crypto/PATENTS | 22 + src/dma/vendor/golang.org/x/crypto/acme/acme.go | 922 ++++ .../golang.org/x/crypto/acme/autocert/autocert.go | 1139 +++++ .../golang.org/x/crypto/acme/autocert/cache.go | 130 + .../golang.org/x/crypto/acme/autocert/listener.go | 157 + .../golang.org/x/crypto/acme/autocert/renewal.go | 141 + src/dma/vendor/golang.org/x/crypto/acme/http.go | 281 ++ src/dma/vendor/golang.org/x/crypto/acme/jws.go | 153 + src/dma/vendor/golang.org/x/crypto/acme/types.go | 329 ++ 258 files changed, 60007 insertions(+) create mode 100644 src/dma/Gopkg.lock create mode 100644 src/dma/Gopkg.toml create mode 100644 src/dma/cmd/infofetch/daemon.go create mode 100644 src/dma/cmd/infofetch/openstack.go create mode 100644 src/dma/cmd/infofetch/virsh_domain.go create mode 100644 src/dma/cmd/server/agent.go create mode 100644 src/dma/cmd/server/amqp.go create mode 100644 src/dma/cmd/server/api.go create mode 100644 src/dma/cmd/server/main.go create mode 100644 src/dma/cmd/threshold/evaluate.go create mode 100644 src/dma/cmd/threshold/main.go create mode 100644 src/dma/cmd/threshold/read.go create mode 100644 src/dma/cmd/threshold/transmit.go create mode 100644 src/dma/examples/config.toml create mode 100644 src/dma/pkg/common/redispool.go create mode 100644 src/dma/pkg/common/types.go create mode 100644 src/dma/sampleconf/demo-memory-linearreg/analysis.conf create mode 100644 src/dma/sampleconf/demo-memory-linearreg/evaluator.conf create mode 100644 src/dma/sampleconf/demo-memory-linearreg/read-metrics.conf create mode 100644 src/dma/sampleconf/demo-memory-linearreg/transmitter.conf create mode 100644 src/dma/sampleconf/demo-memory-linearreg/write-redis.conf create mode 100644 src/dma/sampleconf/simple-threshold/evaluator.conf create mode 100644 src/dma/sampleconf/simple-threshold/read-metrics.conf create mode 100644 src/dma/sampleconf/simple-threshold/transmitter.conf create mode 100644 src/dma/sampleconf/simple-threshold/write-redis.conf create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/.gitignore create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/.travis.yml create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/COMPATIBLE create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/COPYING create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/Makefile create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/README.md create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/decode.go create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/decode_meta.go create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/doc.go create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/encode.go create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/encoding_types.go create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/lex.go create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/parse.go create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/session.vim create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/type_check.go create mode 100644 src/dma/vendor/github.com/BurntSushi/toml/type_fields.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/.gitignore create mode 100644 src/dma/vendor/github.com/go-redis/redis/.travis.yml create mode 100644 src/dma/vendor/github.com/go-redis/redis/CHANGELOG.md create mode 100644 src/dma/vendor/github.com/go-redis/redis/LICENSE create mode 100644 src/dma/vendor/github.com/go-redis/redis/Makefile create mode 100644 src/dma/vendor/github.com/go-redis/redis/README.md create mode 100644 src/dma/vendor/github.com/go-redis/redis/cluster.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/cluster_commands.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/command.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/commands.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/doc.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/internal/consistenthash/consistenthash.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/internal/error.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/internal/internal.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/internal/log.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/internal/once.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/internal/pool/conn.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/internal/pool/pool.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/internal/pool/pool_single.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/internal/pool/pool_sticky.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/internal/proto/reader.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/internal/proto/scan.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/internal/proto/write_buffer.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/internal/singleflight/singleflight.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/internal/util.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/internal/util/safe.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/internal/util/strconv.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/internal/util/unsafe.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/iterator.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/options.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/parser.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/pipeline.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/pubsub.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/redis.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/result.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/ring.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/script.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/sentinel.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/tx.go create mode 100644 src/dma/vendor/github.com/go-redis/redis/universal.go create mode 100644 src/dma/vendor/github.com/labstack/echo/.editorconfig create mode 100644 src/dma/vendor/github.com/labstack/echo/.gitattributes create mode 100644 src/dma/vendor/github.com/labstack/echo/.gitignore create mode 100644 src/dma/vendor/github.com/labstack/echo/.travis.yml create mode 100644 src/dma/vendor/github.com/labstack/echo/Gopkg.lock create mode 100644 src/dma/vendor/github.com/labstack/echo/Gopkg.toml create mode 100644 src/dma/vendor/github.com/labstack/echo/LICENSE create mode 100644 src/dma/vendor/github.com/labstack/echo/Makefile create mode 100644 src/dma/vendor/github.com/labstack/echo/README.md create mode 100644 src/dma/vendor/github.com/labstack/echo/bind.go create mode 100644 src/dma/vendor/github.com/labstack/echo/context.go create mode 100644 src/dma/vendor/github.com/labstack/echo/echo.go create mode 100644 src/dma/vendor/github.com/labstack/echo/group.go create mode 100644 src/dma/vendor/github.com/labstack/echo/log.go create mode 100644 src/dma/vendor/github.com/labstack/echo/response.go create mode 100644 src/dma/vendor/github.com/labstack/echo/router.go create mode 100644 src/dma/vendor/github.com/labstack/gommon/LICENSE create mode 100644 src/dma/vendor/github.com/labstack/gommon/color/README.md create mode 100644 src/dma/vendor/github.com/labstack/gommon/color/color.go create mode 100644 src/dma/vendor/github.com/labstack/gommon/log/README.md create mode 100644 src/dma/vendor/github.com/labstack/gommon/log/color.go create mode 100644 src/dma/vendor/github.com/labstack/gommon/log/log.go create mode 100644 src/dma/vendor/github.com/labstack/gommon/log/white.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/.gitignore create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/.gitpublish create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/.travis.yml create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/FAQ.md create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/LICENSE create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/README.md create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/callbacks.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/callbacks_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/callbacks_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/connect.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/connect_compat.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/connect_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/connect_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/doc.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/domain.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/domain_compat.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/domain_events.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/domain_events_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/domain_events_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/domain_snapshot.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/domain_snapshot_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/domain_snapshot_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/domain_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/domain_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/error.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/error_compat.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/events.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/events_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/events_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/interface.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/interface_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/interface_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/libvirtd.conf create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/libvirtd.sasl create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/lxc.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/lxc_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/lxc_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/network.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/network_compat.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/network_events.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/network_events_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/network_events_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/network_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/network_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/node_device.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/node_device_compat.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/node_device_events.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/node_device_events_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/node_device_events_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/node_device_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/node_device_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/nwfilter.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/nwfilter_binding.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/nwfilter_binding_compat.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/nwfilter_binding_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/nwfilter_binding_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/nwfilter_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/nwfilter_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/qemu.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/qemu_compat.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/qemu_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/qemu_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/secret.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/secret_compat.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/secret_events.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/secret_events_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/secret_events_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/secret_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/secret_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/storage_pool.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/storage_pool_compat.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/storage_pool_events.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/storage_pool_events_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/storage_pool_events_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/storage_pool_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/storage_pool_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/storage_volume.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/storage_volume_compat.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/storage_volume_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/storage_volume_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/stream.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/stream_compat.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/stream_wrapper.go create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/stream_wrapper.h create mode 100644 src/dma/vendor/github.com/libvirt/libvirt-go/typedparams.go create mode 100644 src/dma/vendor/github.com/mattn/go-colorable/.travis.yml create mode 100644 src/dma/vendor/github.com/mattn/go-colorable/LICENSE create mode 100644 src/dma/vendor/github.com/mattn/go-colorable/README.md create mode 100644 src/dma/vendor/github.com/mattn/go-colorable/colorable_appengine.go create mode 100644 src/dma/vendor/github.com/mattn/go-colorable/colorable_others.go create mode 100644 src/dma/vendor/github.com/mattn/go-colorable/colorable_windows.go create mode 100644 src/dma/vendor/github.com/mattn/go-colorable/noncolorable.go create mode 100644 src/dma/vendor/github.com/mattn/go-isatty/.travis.yml create mode 100644 src/dma/vendor/github.com/mattn/go-isatty/LICENSE create mode 100644 src/dma/vendor/github.com/mattn/go-isatty/README.md create mode 100644 src/dma/vendor/github.com/mattn/go-isatty/doc.go create mode 100644 src/dma/vendor/github.com/mattn/go-isatty/isatty_appengine.go create mode 100644 src/dma/vendor/github.com/mattn/go-isatty/isatty_bsd.go create mode 100644 src/dma/vendor/github.com/mattn/go-isatty/isatty_linux.go create mode 100644 src/dma/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go create mode 100644 src/dma/vendor/github.com/mattn/go-isatty/isatty_others.go create mode 100644 src/dma/vendor/github.com/mattn/go-isatty/isatty_solaris.go create mode 100644 src/dma/vendor/github.com/mattn/go-isatty/isatty_windows.go create mode 100644 src/dma/vendor/github.com/streadway/amqp/.gitignore create mode 100644 src/dma/vendor/github.com/streadway/amqp/.travis.yml create mode 100644 src/dma/vendor/github.com/streadway/amqp/CONTRIBUTING.md create mode 100644 src/dma/vendor/github.com/streadway/amqp/LICENSE create mode 100644 src/dma/vendor/github.com/streadway/amqp/README.md create mode 100644 src/dma/vendor/github.com/streadway/amqp/allocator.go create mode 100644 src/dma/vendor/github.com/streadway/amqp/auth.go create mode 100755 src/dma/vendor/github.com/streadway/amqp/certs.sh create mode 100644 src/dma/vendor/github.com/streadway/amqp/channel.go create mode 100644 src/dma/vendor/github.com/streadway/amqp/confirms.go create mode 100644 src/dma/vendor/github.com/streadway/amqp/connection.go create mode 100644 src/dma/vendor/github.com/streadway/amqp/consumers.go create mode 100644 src/dma/vendor/github.com/streadway/amqp/delivery.go create mode 100644 src/dma/vendor/github.com/streadway/amqp/doc.go create mode 100644 src/dma/vendor/github.com/streadway/amqp/fuzz.go create mode 100755 src/dma/vendor/github.com/streadway/amqp/gen.sh create mode 100755 src/dma/vendor/github.com/streadway/amqp/pre-commit create mode 100644 src/dma/vendor/github.com/streadway/amqp/read.go create mode 100644 src/dma/vendor/github.com/streadway/amqp/return.go create mode 100644 src/dma/vendor/github.com/streadway/amqp/spec091.go create mode 100644 src/dma/vendor/github.com/streadway/amqp/types.go create mode 100644 src/dma/vendor/github.com/streadway/amqp/uri.go create mode 100644 src/dma/vendor/github.com/streadway/amqp/write.go create mode 100644 src/dma/vendor/github.com/valyala/bytebufferpool/.travis.yml create mode 100644 src/dma/vendor/github.com/valyala/bytebufferpool/LICENSE create mode 100644 src/dma/vendor/github.com/valyala/bytebufferpool/README.md create mode 100644 src/dma/vendor/github.com/valyala/bytebufferpool/bytebuffer.go create mode 100644 src/dma/vendor/github.com/valyala/bytebufferpool/doc.go create mode 100644 src/dma/vendor/github.com/valyala/bytebufferpool/pool.go create mode 100644 src/dma/vendor/github.com/valyala/fasttemplate/LICENSE create mode 100644 src/dma/vendor/github.com/valyala/fasttemplate/README.md create mode 100644 src/dma/vendor/github.com/valyala/fasttemplate/template.go create mode 100644 src/dma/vendor/github.com/valyala/fasttemplate/unsafe.go create mode 100644 src/dma/vendor/github.com/valyala/fasttemplate/unsafe_gae.go create mode 100644 src/dma/vendor/golang.org/x/crypto/AUTHORS create mode 100644 src/dma/vendor/golang.org/x/crypto/CONTRIBUTORS create mode 100644 src/dma/vendor/golang.org/x/crypto/LICENSE create mode 100644 src/dma/vendor/golang.org/x/crypto/PATENTS create mode 100644 src/dma/vendor/golang.org/x/crypto/acme/acme.go create mode 100644 src/dma/vendor/golang.org/x/crypto/acme/autocert/autocert.go create mode 100644 src/dma/vendor/golang.org/x/crypto/acme/autocert/cache.go create mode 100644 src/dma/vendor/golang.org/x/crypto/acme/autocert/listener.go create mode 100644 src/dma/vendor/golang.org/x/crypto/acme/autocert/renewal.go create mode 100644 src/dma/vendor/golang.org/x/crypto/acme/http.go create mode 100644 src/dma/vendor/golang.org/x/crypto/acme/jws.go create mode 100644 src/dma/vendor/golang.org/x/crypto/acme/types.go (limited to 'src') diff --git a/src/dma/Gopkg.lock b/src/dma/Gopkg.lock new file mode 100644 index 00000000..d76c3d8e --- /dev/null +++ b/src/dma/Gopkg.lock @@ -0,0 +1,133 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:b16fbfbcc20645cb419f78325bb2e85ec729b338e996a228124d68931a6f2a37" + name = "github.com/BurntSushi/toml" + packages = ["."] + pruneopts = "UT" + revision = "b26d9c308763d68093482582cea63d69be07a0f0" + version = "v0.3.0" + +[[projects]] + digest = "1:991bb96360eb8db70a40e9c496769fe6a7bbac4047f8376494d9837397078327" + name = "github.com/go-redis/redis" + packages = [ + ".", + "internal", + "internal/consistenthash", + "internal/hashtag", + "internal/pool", + "internal/proto", + "internal/singleflight", + "internal/util", + ] + pruneopts = "UT" + revision = "480db94d33e6088e08d628833b6c0705451d24bb" + version = "v6.13.2" + +[[projects]] + digest = "1:26971a24734a1b911111711f981ffc7463361b36483456f33da623570b7d214c" + name = "github.com/labstack/echo" + packages = ["."] + pruneopts = "UT" + revision = "6d227dfea4d2e52cb76856120b3c17f758139b4e" + version = "3.3.5" + +[[projects]] + digest = "1:faee5b9f53eb1ae4eb04708c040c8c4dd685ce46509671e57a08520a15c54368" + name = "github.com/labstack/gommon" + packages = [ + "color", + "log", + ] + pruneopts = "UT" + revision = "d6898124de917583f5ff5592ef931d1dfe0ddc05" + version = "0.2.6" + +[[projects]] + digest = "1:aed4a1a07381be8758c5cf680977b9d2b83bd319beff4bd311001ef8ba3856b1" + name = "github.com/libvirt/libvirt-go" + packages = ["."] + pruneopts = "UT" + revision = "d5e0adac44d4365a2d97e259f51ab76cc3c01b20" + version = "v4.6.0" + +[[projects]] + digest = "1:c658e84ad3916da105a761660dcaeb01e63416c8ec7bc62256a9b411a05fcd67" + name = "github.com/mattn/go-colorable" + packages = ["."] + pruneopts = "UT" + revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072" + version = "v0.0.9" + +[[projects]] + digest = "1:d4d17353dbd05cb52a2a52b7fe1771883b682806f68db442b436294926bbfafb" + name = "github.com/mattn/go-isatty" + packages = ["."] + pruneopts = "UT" + revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39" + version = "v0.0.3" + +[[projects]] + branch = "master" + digest = "1:0c088044c1c1ee83e723692442df4fec9b3f2e57f7a99773485c97adfbc31695" + name = "github.com/streadway/amqp" + packages = ["."] + pruneopts = "UT" + revision = "70e15c650864f4fc47f5d3c82ea117285480895d" + +[[projects]] + branch = "master" + digest = "1:c468422f334a6b46a19448ad59aaffdfc0a36b08fdcc1c749a0b29b6453d7e59" + name = "github.com/valyala/bytebufferpool" + packages = ["."] + pruneopts = "UT" + revision = "e746df99fe4a3986f4d4f79e13c1e0117ce9c2f7" + +[[projects]] + branch = "master" + digest = "1:268b8bce0064e8c057d7b913605459f9a26dcab864c0886a56d196540fbf003f" + name = "github.com/valyala/fasttemplate" + packages = ["."] + pruneopts = "UT" + revision = "dcecefd839c4193db0d35b88ec65b4c12d360ab0" + +[[projects]] + branch = "master" + digest = "1:dedf20eb0d3e8d6aa8a4d3d2fae248222b688ed528201995e152cc497899123c" + name = "golang.org/x/crypto" + packages = [ + "acme", + "acme/autocert", + ] + pruneopts = "UT" + revision = "aabede6cba87e37f413b3e60ebfc214f8eeca1b0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/BurntSushi/toml", + "github.com/go-redis/redis", + "github.com/go-redis/redis/internal", + "github.com/go-redis/redis/internal/consistenthash", + "github.com/go-redis/redis/internal/hashtag", + "github.com/go-redis/redis/internal/pool", + "github.com/go-redis/redis/internal/proto", + "github.com/go-redis/redis/internal/singleflight", + "github.com/go-redis/redis/internal/util", + "github.com/labstack/echo", + "github.com/labstack/gommon/color", + "github.com/labstack/gommon/log", + "github.com/libvirt/libvirt-go", + "github.com/mattn/go-colorable", + "github.com/mattn/go-isatty", + "github.com/streadway/amqp", + "github.com/valyala/bytebufferpool", + "github.com/valyala/fasttemplate", + "golang.org/x/crypto/acme", + "golang.org/x/crypto/acme/autocert", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/src/dma/Gopkg.toml b/src/dma/Gopkg.toml new file mode 100644 index 00000000..3d26fcc6 --- /dev/null +++ b/src/dma/Gopkg.toml @@ -0,0 +1,51 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + +ignored = ["golang.org/x/sys/unix"] + +[[constraint]] + name = "github.com/BurntSushi/toml" + version = "0.3.0" + +[[constraint]] + name = "github.com/go-redis/redis" + version = "6.13.2" + +[[constraint]] + name = "github.com/labstack/echo" + version = "3.3.5" + +[[constraint]] + name = "github.com/libvirt/libvirt-go" + version = "4.6.0" + +[[constraint]] + branch = "master" + name = "github.com/streadway/amqp" + +[prune] + go-tests = true + unused-packages = true diff --git a/src/dma/cmd/infofetch/daemon.go b/src/dma/cmd/infofetch/daemon.go new file mode 100644 index 00000000..d4ff94f5 --- /dev/null +++ b/src/dma/cmd/infofetch/daemon.go @@ -0,0 +1,103 @@ +/* + * Copyright 2018 NEC Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "github.com/BurntSushi/toml" + "github.com/distributed-monitoring/agent/pkg/common" + "github.com/go-redis/redis" + libvirt "github.com/libvirt/libvirt-go" + "log" + "sync" +) + +var infoPool common.RedisPool + +// Config is ... +type Config struct { + Common CommonConfig + InfoFetch InfoFetchConfig +} + +// CommonConfig is ... +type CommonConfig struct { + RedisHost string `toml:"redis_host"` + RedisPort string `toml:"redis_port"` + RedisPassword string `toml:"redis_password"` + RedisDB int `toml:"redis_db"` +} + +// InfoFetchConfig is ... +type InfoFetchConfig struct { + OSUsername string `toml:"os_username"` + OSUserDomainName string `toml:"os_user_domain_name"` + OSProjectDomainName string `toml:"os_project_domain_name"` + OSProjectName string `toml:"os_project_name"` + OSPassword string `toml:"os_password"` + OSAuthURL string `toml:"os_auth_url"` +} + +func main() { + + var config Config + _, err := toml.DecodeFile("/etc/barometer-dma/config.toml", &config) + if err != nil { + log.Println("Read error of config file") + } + + var waitgroup sync.WaitGroup + libvirt.EventRegisterDefaultImpl() + + redisClient := redis.NewClient(&redis.Options{ + Addr: config.Common.RedisHost + ":" + config.Common.RedisPort, + Password: config.Common.RedisPassword, + DB: config.Common.RedisDB, + }) + infoPool = common.RedisPool{Client: redisClient} + // Initialize redis db... + infoPool.DelAll() + + conn, err := libvirt.NewConnect("qemu:///system") + if err != nil { + log.Fatalln("libvirt connect error") + } + defer conn.Close() + + vmIfInfoChan := make(chan string) + { + ctx := context.Background() + waitgroup.Add(1) + go func() { + RunNeutronInfoFetch(ctx, &config, vmIfInfoChan) + waitgroup.Done() + }() + } + + //Get active VM info + GetActiveDomain(conn, vmIfInfoChan) + { + ctx := context.Background() + waitgroup.Add(1) + go func() { + RunVirshEventLoop(ctx, conn, vmIfInfoChan) + waitgroup.Done() + }() + } + + waitgroup.Wait() +} diff --git a/src/dma/cmd/infofetch/openstack.go b/src/dma/cmd/infofetch/openstack.go new file mode 100644 index 00000000..c0c54f5a --- /dev/null +++ b/src/dma/cmd/infofetch/openstack.go @@ -0,0 +1,522 @@ +/* + * Copyright 2017 Red Hat + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" + "os" + "strings" + "text/template" + "time" +) + +var env *InfoFetchConfig + +type userInfo struct { + UserDomainName string + UserName string + Password string + ProjectDomainName string + ProjectName string +} + +var tokenJSONTemplate = `{ + "auth": { + "identity": { + "methods": [ + "password" + ], + "password": { + "user": { + "domain": { + "name": "{{.UserDomainName}}" + }, + "name": "{{.UserName}}", + "password": "{{.Password}}" + } + } + }, + "scope": { + "project": { + "domain": { + "name": "{{.ProjectDomainName}}" + }, + "name": "{{.ProjectName}}" + } + } + } +} +` + +type tokenReply struct { + Token struct { + IsDomain bool `json:"is_domain"` + Methods []string `json:"methods"` + Roles []struct { + ID string `json:"id"` + Name string `json:"name"` + } `json:"roles"` + ExpiresAt time.Time `json:"expires_at"` + Project struct { + Domain struct { + ID string `json:"id"` + Name string `json:"name"` + } `json:"domain"` + ID string `json:"id"` + Name string `json:"name"` + } `json:"project"` + User struct { + PasswordExpiresAt interface{} `json:"password_expires_at"` + Domain struct { + ID string `json:"id"` + Name string `json:"name"` + } `json:"domain"` + ID string `json:"id"` + Name string `json:"name"` + } `json:"user"` + AuditIds []string `json:"audit_ids"` + IssuedAt time.Time `json:"issued_at"` + } `json:"token"` +} + +type token struct { + Token string + ExpiresAt time.Time +} + +func (t *token) CheckToken() { + now := time.Now() + + if t.ExpiresAt.Sub(now).Seconds() < 30 { + newToken, _ := getToken() + t.Token = newToken.Token + t.ExpiresAt = newToken.ExpiresAt + } +} + +func getToken() (*token, error) { + var buf bytes.Buffer + + t := template.Must(template.New("json template1").Parse(tokenJSONTemplate)) + p := userInfo{ + UserDomainName: env.OSUserDomainName, + UserName: env.OSUsername, + Password: env.OSPassword, + ProjectDomainName: env.OSProjectDomainName, + ProjectName: env.OSProjectName, + } + t.Execute(&buf, p) + + body := bytes.NewReader(buf.Bytes()) + req, err := http.NewRequest("POST", env.OSAuthURL+"/auth/tokens?nocatalog", body) + if err != nil { + return &token{"", time.Unix(0, 0)}, fmt.Errorf("http request failed: %v", err) + } + req.Header.Set("Content-Type", "application/json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return &token{"", time.Unix(0, 0)}, fmt.Errorf("http POST failed: %v", err) + } + defer resp.Body.Close() + b, err := ioutil.ReadAll(resp.Body) + + tokenStr, ok := resp.Header["X-Subject-Token"] + if ok != true && len(tokenStr) != 1 { + return &token{"", time.Unix(0, 0)}, fmt.Errorf("no token in openstack reply") + } + + var repl tokenReply + err = json.Unmarshal(b, &repl) + + return &token{tokenStr[0], repl.Token.ExpiresAt}, nil +} + +type service struct { + Description string `json:"description"` + Links struct { + Self string `json:"self"` + } `json:"links"` + Enabled bool `json:"enabled"` + Type string `json:"type"` + ID string `json:"id"` + Name string `json:"name"` +} + +type serviceListReply struct { + Services []service `json:"services"` +} + +func (s *serviceListReply) GetService(name string) (*service, error) { + for _, v := range s.Services { + if v.Name == name { + return &v, nil + } + } + return nil, fmt.Errorf("no service id (%s) found", name) +} + +type endPoint struct { + RegionID string `json:"region_id"` + Links struct { + Self string `json:"self"` + } `json:"links"` + URL string `json:"url"` + Region string `json:"region"` + Enabled bool `json:"enabled"` + Interface string `json:"interface"` + ServiceID string `json:"service_id"` + ID string `json:"id"` +} + +type endPointReply struct { + Endpoints []endPoint `json:"endpoints"` +} + +func (e *endPointReply) GetEndpoint(serviceid string, ifname string) (*endPoint, error) { + for _, v := range e.Endpoints { + if v.Interface == ifname && v.ServiceID == serviceid { + return &v, nil + } + } + return nil, fmt.Errorf("no endpoint found (%s/%s)", serviceid, ifname) +} + +func getEndpoints(token *token) (endPointReply, error) { + token.CheckToken() + req, err := http.NewRequest("GET", env.OSAuthURL+"/endpoints", nil) + if err != nil { + return endPointReply{}, fmt.Errorf("request failed:%v", err) + } + req.Header.Set("X-Auth-Token", token.Token) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return endPointReply{}, fmt.Errorf("http GET failed:%v", err) + } + defer resp.Body.Close() + + b, err := ioutil.ReadAll(resp.Body) + //fmt.Printf("%s", string(b)) + + var repl endPointReply + err = json.Unmarshal(b, &repl) + if err != nil { + return endPointReply{}, fmt.Errorf("http reply decoding failed:%v", err) + } + //fmt.Printf("%v", repl) + return repl, nil +} + +func getServiceList(token *token) (serviceListReply, error) { + token.CheckToken() + req, err := http.NewRequest("GET", env.OSAuthURL+"/services", nil) + if err != nil { + return serviceListReply{}, fmt.Errorf("request failed:%v", err) + } + req.Header.Set("X-Auth-Token", token.Token) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return serviceListReply{}, fmt.Errorf("http GET failed:%v", err) + } + defer resp.Body.Close() + b, err := ioutil.ReadAll(resp.Body) + + var repl serviceListReply + err = json.Unmarshal(b, &repl) + if err != nil { + return serviceListReply{}, fmt.Errorf("http reply decoding failed:%v", err) + } + return repl, nil +} + +type neutronPort struct { + AllowedAddressPairs []interface{} `json:"allowed_address_pairs"` + ExtraDhcpOpts []interface{} `json:"extra_dhcp_opts"` + UpdatedAt time.Time `json:"updated_at"` + DeviceOwner string `json:"device_owner"` + RevisionNumber int `json:"revision_number"` + PortSecurityEnabled bool `json:"port_security_enabled"` + BindingProfile struct { + } `json:"binding:profile"` + FixedIps []struct { + SubnetID string `json:"subnet_id"` + IPAddress string `json:"ip_address"` + } `json:"fixed_ips"` + ID string `json:"id"` + SecurityGroups []interface{} `json:"security_groups"` + BindingVifDetails struct { + PortFilter bool `json:"port_filter"` + DatapathType string `json:"datapath_type"` + OvsHybridPlug bool `json:"ovs_hybrid_plug"` + } `json:"binding:vif_details"` + BindingVifType string `json:"binding:vif_type"` + MacAddress string `json:"mac_address"` + ProjectID string `json:"project_id"` + Status string `json:"status"` + BindingHostID string `json:"binding:host_id"` + Description string `json:"description"` + Tags []interface{} `json:"tags"` + QosPolicyID interface{} `json:"qos_policy_id"` + Name string `json:"name"` + AdminStateUp bool `json:"admin_state_up"` + NetworkID string `json:"network_id"` + TenantID string `json:"tenant_id"` + CreatedAt time.Time `json:"created_at"` + BindingVnicType string `json:"binding:vnic_type"` + DeviceID string `json:"device_id"` +} + +type neutronPortReply struct { + Ports []neutronPort `json:"ports"` +} + +func getNeutronPorts(token *token, endpoint string) (neutronPortReply, error) { + token.CheckToken() + req, err := http.NewRequest("GET", endpoint+"/v2.0/ports", nil) + if err != nil { + return neutronPortReply{}, fmt.Errorf("request failed:%v", err) + } + req.Header.Set("X-Auth-Token", token.Token) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return neutronPortReply{}, fmt.Errorf("http GET failed:%v", err) + } + defer resp.Body.Close() + + b, err := ioutil.ReadAll(resp.Body) + + var repl neutronPortReply + err = json.Unmarshal(b, &repl) + if err != nil { + return neutronPortReply{}, fmt.Errorf("http reply decoding failed:%v", err) + } + return repl, nil +} + +func (n *neutronPortReply) GetNeutronPortfromMAC(mac string) (*neutronPort, + error) { + for _, v := range n.Ports { + if v.MacAddress == strings.ToLower(mac) { + return &v, nil + } + } + return nil, fmt.Errorf("no port (%s) found", mac) +} + +type neutronNetwork struct { + ProviderPhysicalNetwork string `json:"provider:physical_network"` + Ipv6AddressScope interface{} `json:"ipv6_address_scope"` + RevisionNumber int `json:"revision_number"` + PortSecurityEnabled bool `json:"port_security_enabled"` + Mtu int `json:"mtu"` + ID string `json:"id"` + RouterExternal bool `json:"router:external"` + AvailabilityZoneHints []interface{} `json:"availability_zone_hints"` + AvailabilityZones []string `json:"availability_zones"` + ProviderSegmentationID interface{} `json:"provider:segmentation_id"` + Ipv4AddressScope interface{} `json:"ipv4_address_scope"` + Shared bool `json:"shared"` + ProjectID string `json:"project_id"` + Status string `json:"status"` + Subnets []string `json:"subnets"` + Description string `json:"description"` + Tags []interface{} `json:"tags"` + UpdatedAt time.Time `json:"updated_at"` + IsDefault bool `json:"is_default"` + QosPolicyID interface{} `json:"qos_policy_id"` + Name string `json:"name"` + AdminStateUp bool `json:"admin_state_up"` + TenantID string `json:"tenant_id"` + CreatedAt time.Time `json:"created_at"` + ProviderNetworkType string `json:"provider:network_type"` +} + +type neutronNetworkReply struct { + Networks []neutronNetwork `json:"networks"` +} + +func (n *neutronNetworkReply) GetNetworkFromID(netid string) (*neutronNetwork, error) { + for _, v := range n.Networks { + if v.ID == netid { + return &v, nil + } + } + return nil, fmt.Errorf("no network (%s) found", netid) +} + +func getNetworkReply(token *token, endpoint string) (neutronNetworkReply, error) { + token.CheckToken() + req, err := http.NewRequest("GET", endpoint+"/v2.0/networks", nil) + if err != nil { + return neutronNetworkReply{}, fmt.Errorf("request failed:%v", err) + } + req.Header.Set("X-Auth-Token", token.Token) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return neutronNetworkReply{}, fmt.Errorf("http GET failed:%v", err) + } + defer resp.Body.Close() + + b, err := ioutil.ReadAll(resp.Body) + + var repl neutronNetworkReply + err = json.Unmarshal(b, &repl) + if err != nil { + return neutronNetworkReply{}, fmt.Errorf("http reply decoding failed:%v", err) + } + return repl, nil +} + +type novaCompute struct { + ID string `json:"id"` + Links []struct { + Href string `json:"href"` + Rel string `json:"rel"` + } `json:"links"` + Name string `json:"name"` +} + +type novaComputeReply struct { + Servers []novaCompute `json:"servers"` +} + +func (n *novaComputeReply) GetComputeFromID(vmid string) (*novaCompute, error) { + for _, v := range n.Servers { + if v.ID == vmid { + return &v, nil + } + } + return nil, fmt.Errorf("no vm (%s) found", vmid) +} + +func getComputeReply(token *token, endpoint string) (novaComputeReply, error) { + token.CheckToken() + values := url.Values{} + values.Add("all_tenants", "1") + + req, err := http.NewRequest("GET", endpoint+"/servers", nil) + if err != nil { + return novaComputeReply{}, fmt.Errorf("request failed:%v", err) + } + req.Header.Set("X-Auth-Token", token.Token) + req.URL.RawQuery = values.Encode() + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return novaComputeReply{}, fmt.Errorf("http GET failed:%v", err) + } + defer resp.Body.Close() + + b, err := ioutil.ReadAll(resp.Body) + + var repl novaComputeReply + err = json.Unmarshal(b, &repl) + if err != nil { + return novaComputeReply{}, fmt.Errorf("http reply decoding failed:%v", err) + } + + return repl, nil +} + +type osNeutronInterfaceAnnotation struct { + IfName string + VMName string + NetworkName string +} + +// RunNeutronInfoFetch gets redis key update from libvirt and get network information +// from Neutron with REST api. The retrieved information is stored under redis, +// if//neutron_network +func RunNeutronInfoFetch(ctx context.Context, config *Config, vmIfInfo chan string) error { + env = &config.InfoFetch + token, err := getToken() + + if err != nil { + fmt.Fprintf(os.Stderr, "cannot get token: %v", err) + return err + } + + svc, _ := getServiceList(token) + neuID, _ := svc.GetService("neutron") + //fmt.Printf("neutron id:%s\n", id.ID) + + novaID, _ := svc.GetService("nova") + //fmt.Printf("nova id:%s\n", id.ID) + + endpoints, _ := getEndpoints(token) + neuEndpoint, _ := endpoints.GetEndpoint(neuID.ID, "admin") + //fmt.Printf("neutron endpoint:%s\n", neuEndpoint.URL) + + novaEndpoint, _ := endpoints.GetEndpoint(novaID.ID, "admin") + //fmt.Printf("nova endpoint:%s\n", novaEndpoint.URL) + + getComputeReply(token, novaEndpoint.URL) + getNeutronPorts(token, neuEndpoint.URL) + //vmrepl, _ := getComputeReply(token, novaEndpoint.URL) + //prepl, _ := getNeutronPorts(token, neuEndpoint.URL) + +EVENTLOOP: + for { + select { + case <-ctx.Done(): + break EVENTLOOP + case key := <-vmIfInfo: + log.Printf("Incoming IF: %v", key) + libvirtIfInfo, err := infoPool.Get(key) + if err != nil { + log.Fatalf("Err: %v", err) + } else { + var ifInfo osVMInterfaceAnnotation + err = json.Unmarshal([]byte(libvirtIfInfo), &ifInfo) + if err != nil { + log.Fatalf("Err: %v", err) + } else { + vmrepl, _ := getComputeReply(token, novaEndpoint.URL) + prepl, _ := getNeutronPorts(token, neuEndpoint.URL) + nrepl, _ := getNetworkReply(token, neuEndpoint.URL) + netid, _ := prepl.GetNeutronPortfromMAC(ifInfo.MacAddr) + net, _ := nrepl.GetNetworkFromID(netid.NetworkID) + vm, _ := vmrepl.GetComputeFromID(netid.DeviceID) + osIfInfo := osNeutronInterfaceAnnotation{ + IfName: ifInfo.Target, + VMName: vm.Name, + NetworkName: net.Name} + + osIfInfoJSON, err := json.Marshal(osIfInfo) + if err != nil { + log.Fatalf("Err: %v", err) + } else { + log.Printf("Get: vmname: %s / networkname:%s", vm.Name, net.Name) + infoPool.Set(fmt.Sprintf("if/%s/%s", ifInfo.Target, "neutron_network"), string(osIfInfoJSON)) + } + } + } + } + } + return nil +} diff --git a/src/dma/cmd/infofetch/virsh_domain.go b/src/dma/cmd/infofetch/virsh_domain.go new file mode 100644 index 00000000..b79f5bdd --- /dev/null +++ b/src/dma/cmd/infofetch/virsh_domain.go @@ -0,0 +1,264 @@ +/* + * Copyright 2017 Red Hat + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "encoding/json" + "encoding/xml" + "fmt" + libvirt "github.com/libvirt/libvirt-go" + "log" +) + +type instance struct { + Name string `xml:"name"` + Owner struct { + User string `xml:"user"` + Project string `xml:"project"` + } `xml:"owner"` + Flavor struct { + Name string `xml:"name,attr"` + } `xml:"flavor"` +} + +type domain struct { + Name string `xml:"name"` + Devices struct { + Interfaces []struct { + Type string `xml:"type,attr"` + Mac struct { + Address string `xml:"address,attr"` + } `xml:"mac"` + Target struct { + Dev string `xml:"dev,attr"` + } `xml:"target"` + } `xml:"interface"` + } `xml:"devices"` +} + +type osVMAnnotation struct { + Name string + Owner string + Project string + Flavor string +} + +type osVMInterfaceAnnotation struct { + Type string + MacAddr string + Target string + VMName string +} + +func parseNovaMetadata(metadata string) (*osVMAnnotation, error) { + data := new(instance) + + if err := xml.Unmarshal([]byte(metadata), data); err != nil { + log.Println("XML Unmarshal error:", err) + return nil, err + } + log.Printf("Get name: %s user: %s, project: %s, flavor: %s", data.Name, data.Owner.User, data.Owner.Project, data.Flavor.Name) + return &osVMAnnotation{ + Name: data.Name, + Owner: data.Owner.User, + Project: data.Owner.Project, + Flavor: data.Flavor.Name}, nil +} + +func parseXMLForMAC(dumpxml string) (*[]osVMInterfaceAnnotation, error) { + data := new(domain) + + if err := xml.Unmarshal([]byte(dumpxml), data); err != nil { + log.Println("XML Unmarshal error:", err) + return nil, err + } + + ifAnnotation := make([]osVMInterfaceAnnotation, len(data.Devices.Interfaces)) + for i, v := range data.Devices.Interfaces { + log.Printf("Interface type: %s, mac_addr: %s, target_dev: %s", v.Type, v.Mac.Address, v.Target.Dev) + ifAnnotation[i] = osVMInterfaceAnnotation{ + Type: v.Type, + MacAddr: v.Mac.Address, + Target: v.Target.Dev, + VMName: data.Name} + } + return &ifAnnotation, nil +} + +func setInterfaceAnnotation(ifInfo *[]osVMInterfaceAnnotation, vmIfInfoChan chan string) { + for _, v := range *ifInfo { + ifInfoJSON, err := json.Marshal(v) + if err != nil { + log.Fatalf("Err: %v", err) + } + infoPool.Set(fmt.Sprintf("if/%s/%s", v.Target, "network"), string(ifInfoJSON)) + + vmIfInfoChan <- fmt.Sprintf("if/%s/%s", v.Target, "network") + } + return +} + +func domainEventLifecycleCallback(vmIfInfo chan string) func(c *libvirt.Connect, d *libvirt.Domain, event *libvirt.DomainEventLifecycle) { + + return func(c *libvirt.Connect, + d *libvirt.Domain, event *libvirt.DomainEventLifecycle) { + domName, _ := d.GetName() + + switch event.Event { + case libvirt.DOMAIN_EVENT_DEFINED: + // VM defined: vmname (libvirt, nova), user, project, flavor + // Redis: /vminfo + log.Printf("Event defined: domName: %s, event: %v", domName, event) + metadata, err := d.GetMetadata(libvirt.DOMAIN_METADATA_ELEMENT, "http://openstack.org/xmlns/libvirt/nova/1.0", libvirt.DOMAIN_AFFECT_CONFIG) + if err != nil { + log.Fatalf("Err: %v", err) + } + vmInfo, err := parseNovaMetadata(metadata) + if err != nil { + log.Fatalf("Err: %v", err) + } + vmInfoJSON, err := json.Marshal(vmInfo) + if err != nil { + log.Fatalf("Err: %v", err) + } + infoPool.Set(fmt.Sprintf("vm/%s/%s", domName, "vminfo"), string(vmInfoJSON)) + case libvirt.DOMAIN_EVENT_STARTED: + // VM started: interface type, interface mac addr, intarface type + // Redis: /interfaces + log.Printf("Event started: domName: %s, event: %v", domName, event) + + xml, err := d.GetXMLDesc(0) + if err != nil { + log.Fatalf("Err: %v", err) + } + ifInfo, err := parseXMLForMAC(xml) + if err != nil { + log.Fatalf("Err: %v", err) + } + setInterfaceAnnotation(ifInfo, vmIfInfo) + + ifInfoJSON, err := json.Marshal(ifInfo) + if err != nil { + log.Fatalf("Err: %v", err) + } + infoPool.Set(fmt.Sprintf("vm/%s/%s", domName, "interfaces"), string(ifInfoJSON)) + case libvirt.DOMAIN_EVENT_UNDEFINED: + log.Printf("Event undefined: domName: %s, event: %v", domName, event) + vmIFInfo, err := infoPool.Get(fmt.Sprintf("vm/%s/%s", domName, "interfaces")) + if err != nil { + log.Fatalf("Err: %v", err) + } else { + var interfaces []osVMInterfaceAnnotation + err = json.Unmarshal([]byte(vmIFInfo), &interfaces) + if err != nil { + log.Fatalf("Err: %v", err) + } else { + for _, v := range interfaces { + infoPool.Del(fmt.Sprintf("if/%s/%s", v.Target, "network")) + infoPool.Del(fmt.Sprintf("if/%s/%s", v.Target, "neutron_network")) + } + } + } + infoPool.Del(fmt.Sprintf("vm/%s/%s", domName, "vminfo")) + infoPool.Del(fmt.Sprintf("vm/%s/%s", domName, "interfaces")) + default: + log.Printf("Event misc: domName: %s, event: %v", domName, event) + } + } +} + +// GetActiveDomain gets all active domain information from libvirt and it should be called at startup to get +// current running domain information +func GetActiveDomain(conn *libvirt.Connect, vmIfInfoChan chan string) error { + doms, err := conn.ListAllDomains(libvirt.CONNECT_LIST_DOMAINS_ACTIVE) + if err != nil { + log.Fatalf("libvirt dom list error: %s", err) + return err + } + + for _, d := range doms { + name, err := d.GetName() + + // Get VM Info + metadata, err := d.GetMetadata(libvirt.DOMAIN_METADATA_ELEMENT, "http://openstack.org/xmlns/libvirt/nova/1.0", libvirt.DOMAIN_AFFECT_CONFIG) + if err != nil { + log.Fatalf("Err: %v", err) + return err + } + vmInfo, err := parseNovaMetadata(metadata) + if err != nil { + log.Fatalf("Err: %v", err) + return err + } + vmInfoJSON, err := json.Marshal(vmInfo) + if err != nil { + log.Fatalf("Err: %v", err) + return err + } + infoPool.Set(fmt.Sprintf("vm/%s/%s", name, "vminfo"), string(vmInfoJSON)) + + // Get Network info + xml, err := d.GetXMLDesc(0) + if err != nil { + log.Fatalf("Err: %v", err) + return err + } + ifInfo, err := parseXMLForMAC(xml) + if err != nil { + log.Fatalf("Err: %v", err) + return err + } + setInterfaceAnnotation(ifInfo, vmIfInfoChan) + + ifInfoJSON, err := json.Marshal(ifInfo) + if err != nil { + log.Fatalf("Err: %v", err) + return err + } + infoPool.Set(fmt.Sprintf("vm/%s/%s", name, "interfaces"), string(ifInfoJSON)) + } + return nil +} + +// RunVirshEventLoop is event loop to watch libvirt update +func RunVirshEventLoop(ctx context.Context, conn *libvirt.Connect, vmIfInfoChan chan string) error { + callbackID, err := conn.DomainEventLifecycleRegister(nil, domainEventLifecycleCallback(vmIfInfoChan)) + if err != nil { + log.Fatalf("Err: callbackid: %d %v", callbackID, err) + } + + libvirt.EventAddTimeout(5000, func(timer int) { return }) // 5000 = 5sec + log.Printf("Entering libvirt event loop()") +EVENTLOOP: + for { + select { + case <-ctx.Done(): + break EVENTLOOP + default: + if err := libvirt.EventRunDefaultImpl(); err != nil { + log.Fatalf("%v", err) + } + } + } + log.Printf("Quitting libvirt event loop()") + + if err := conn.DomainEventDeregister(callbackID); err != nil { + log.Fatalf("%v", err) + } + return nil +} diff --git a/src/dma/cmd/server/agent.go b/src/dma/cmd/server/agent.go new file mode 100644 index 00000000..ffcb4a97 --- /dev/null +++ b/src/dma/cmd/server/agent.go @@ -0,0 +1,31 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" +) + +func createCollectdConf() error { + outStatus, errStatus := exec.Command("ssh", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", "localhost", "sudo", "systemctl", "status", "collectd").Output() + if errStatus != nil { + return fmt.Errorf("status NG") + } + if !strings.Contains(string(outStatus), "running") { + return fmt.Errorf("status not running") + } + + _, errStop := exec.Command("ssh", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", "localhost", "sudo", "systemctl", "stop", "collectd").Output() + if errStop != nil { + return fmt.Errorf("stop NG") + } + + _, errStart := exec.Command("ssh", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", "localhost", "sudo", "systemctl", "start", "collectd").Output() + if errStart != nil { + return fmt.Errorf("start NG") + } + + fmt.Println("All complete!") + + return nil +} diff --git a/src/dma/cmd/server/amqp.go b/src/dma/cmd/server/amqp.go new file mode 100644 index 00000000..4d080fe1 --- /dev/null +++ b/src/dma/cmd/server/amqp.go @@ -0,0 +1,108 @@ +/* + * Copyright 2017 NEC Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "github.com/streadway/amqp" + "log" + "os" + "strings" +) + +func failOnError(err error, msg string) { + if err != nil { + log.Fatalf("%s: %s", msg, err) + } +} + +func runSubscriber(ctx context.Context, config *Config) { + confDirPath := config.Server.CollectdConfDir + amqpURL := "amqp://" + config.Server.AmqpUser + ":" + config.Server.AmqpPassword + "@" + config.Server.AmqpHost + ":" + config.Server.AmqpPort + "/" + conn, err := amqp.Dial(amqpURL) + failOnError(err, "Failed to connect to RabbitMQ") + + defer conn.Close() + + ch, err := conn.Channel() + failOnError(err, "Failed to open a channel") + defer ch.Close() + + err = ch.ExchangeDeclare( + "collectd-conf", // name + "fanout", // type + false, // durable + false, // auto-deleted + false, // internal + false, // no-wait + nil, // arguments + ) + failOnError(err, "Failed to declare an exchange") + + q, err := ch.QueueDeclare( + "", // name + false, // durable + false, // delete when unused + true, // exclusive + false, // no-wait + nil, // arguments + ) + failOnError(err, "Failed to declare a queue") + + err = ch.QueueBind( + q.Name, // queue name + "", // routing key + "collectd-conf", // exchange + false, + nil) + failOnError(err, "Failed to bind a queue") + + msgs, err := ch.Consume( + q.Name, // queue + "", // consumer + true, // auto-ack + false, // exclusive + false, // no-local + false, // no-wait + nil, // args + ) + failOnError(err, "Failed to register a consumer") + +EVENTLOOP: + for { + select { + case <-ctx.Done(): + break EVENTLOOP + case d, ok := <-msgs: + if ok { + dataText := strings.SplitN(string(d.Body), "/", 2) + + dst, err := os.Create(confDirPath + "/" + dataText[0]) + failOnError(err, "File create NG") + defer dst.Close() + + dst.Write(([]byte)(dataText[1])) + + err = createCollectdConf() + failOnError(err, "collectd conf NG") + + log.Printf(" [x] %s", d.Body) + } + } + } + +} diff --git a/src/dma/cmd/server/api.go b/src/dma/cmd/server/api.go new file mode 100644 index 00000000..e8add0a1 --- /dev/null +++ b/src/dma/cmd/server/api.go @@ -0,0 +1,85 @@ +/* + * Copyright 2017 NEC Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "fmt" + "github.com/labstack/echo" + "io" + "net/http" + "os" + "time" +) + +func runAPIServer(ctx context.Context, config *Config) { + confDirPath := config.Server.CollectdConfDir + e := echo.New() + + e.GET("/", func(c echo.Context) error { + return c.String(http.StatusOK, "GET OK") + }) + e.POST("/collectd/conf", func(c echo.Context) error { + + file, err := c.FormFile("file") + if err != nil { + return c.String(http.StatusInternalServerError, "file send NG") + } + + src, err := file.Open() + if err != nil { + return c.String(http.StatusInternalServerError, "file open NG") + } + defer src.Close() + + dst, err := os.Create(confDirPath + "/" + file.Filename) + if err != nil { + return c.String(http.StatusInternalServerError, "file create NG") + } + defer dst.Close() + + // Copy + if _, err = io.Copy(dst, src); err != nil { + return c.String(http.StatusInternalServerError, "file write NG") + } + + err = createCollectdConf() + if err != nil { + errstr := fmt.Sprintf("collectd conf NG:%v", err) + return c.String(http.StatusInternalServerError, errstr) + } + return c.String(http.StatusCreated, "collectd conf OK") + }) + + // Start server + go func() { + urlStr := ":" + config.Server.ListenPort + if err := e.Start(urlStr); err != nil { + e.Logger.Info("shutting down the server") + } + }() + + // Wait for context.Done() to gracefully shutdown the server with + // a timeout of 10 seconds. + <-ctx.Done() + ctxShutdown, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err := e.Shutdown(ctxShutdown); err != nil { + e.Logger.Fatal(err) + } + +} diff --git a/src/dma/cmd/server/main.go b/src/dma/cmd/server/main.go new file mode 100644 index 00000000..2e028fa4 --- /dev/null +++ b/src/dma/cmd/server/main.go @@ -0,0 +1,85 @@ +/* + * Copyright 2017 NEC Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "flag" + "github.com/BurntSushi/toml" + "log" + "os" + "sync" +) + +var serverTypeOpt = flag.String("type", "both", "server type: both(default), pubsub, rest") + +// Config is ... +type Config struct { + Server ServerConfig +} + +// ServerConfig is ... +type ServerConfig struct { + ListenPort string `toml:"listen_port"` + + AmqpHost string `toml:"amqp_host"` + AmqpUser string `toml:"amqp_user"` + AmqpPassword string `toml:"amqp_password"` + AmqpPort string `toml:"amqp_port"` + + CollectdConfDir string `toml:"collectd_confdir"` +} + +func main() { + + var config Config + _, err := toml.DecodeFile("/etc/barometer-dma/config.toml", &config) + if err != nil { + log.Fatalf("Read error of config file") + } + + if f, err := os.Stat(config.Server.CollectdConfDir); os.IsNotExist(err) || !f.IsDir() { + log.Fatalf("Path \"%s\" is not a directory", config.Server.CollectdConfDir) + } + + var waitgroup sync.WaitGroup + + flag.Parse() + + if *serverTypeOpt == "both" || *serverTypeOpt == "pubsub" { + ctx := context.Background() + waitgroup.Add(1) + go func() { + defer waitgroup.Done() + runSubscriber(ctx, &config) + }() + log.Printf("Waiting for publish.") + } + + if *serverTypeOpt == "both" || *serverTypeOpt == "rest" { + ctx := context.Background() + waitgroup.Add(1) + go func() { + defer waitgroup.Done() + runAPIServer(ctx, &config) + }() + log.Printf("Waiting for REST.") + } + + waitgroup.Wait() + log.Printf("Server stop.") +} diff --git a/src/dma/cmd/threshold/evaluate.go b/src/dma/cmd/threshold/evaluate.go new file mode 100644 index 00000000..8c961253 --- /dev/null +++ b/src/dma/cmd/threshold/evaluate.go @@ -0,0 +1,37 @@ +/* + * Copyright 2018 NEC Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +func evaluate(config *Config, rdlist []rawData) []evalData { + edlist := []evalData{} + + for _, rd := range rdlist { + maxVal := 0 + for _, val := range rd.datalist { + if maxVal < val { + maxVal = val + } + } + + if maxVal > config.Threshold.Min { + edlist = append(edlist, evalData{rd.key, 1}) + } else { + edlist = append(edlist, evalData{rd.key, 0}) + } + } + return edlist +} diff --git a/src/dma/cmd/threshold/main.go b/src/dma/cmd/threshold/main.go new file mode 100644 index 00000000..b98328d1 --- /dev/null +++ b/src/dma/cmd/threshold/main.go @@ -0,0 +1,107 @@ +/* + * Copyright 2018 NEC Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "fmt" + "github.com/BurntSushi/toml" + "log" + "sync" + "time" +) + +// Config is ... +type Config struct { + Common CommonConfig + Threshold ThresholdConfig +} + +// CommonConfig is ... +type CommonConfig struct { + RedisHost string `toml:"redis_host"` + RedisPort string `toml:"redis_port"` + RedisPassword string `toml:"redis_password"` + RedisDB int `toml:"redis_db"` +} + +// ThresholdConfig is ... +type ThresholdConfig struct { + RedisHost string `toml:"redis_host"` + RedisPort string `toml:"redis_port"` + RedisPassword string `toml:"redis_password"` + RedisDB int `toml:"redis_db"` + + Interval int `toml:"interval"` + Min int `toml:"min"` + + CollectdPlugin string `toml:"collectd_plugin"` + CollectdType string `toml:"collectd_type"` +} + +type rawData struct { + key string + datalist []int +} + +type evalData struct { + key string + label int +} + +func main() { + var config Config + _, err := toml.DecodeFile("/etc/barometer-dma/config.toml", &config) + if err != nil { + log.Fatalf("Read error of config: %s", err) + } + + thresConfig := config.Threshold + log.Printf("Raw data redis config Addr:%s:%s DB:%d", thresConfig.RedisHost, thresConfig.RedisPort, thresConfig.RedisDB) + if thresConfig.RedisPassword == "" { + log.Printf("Raw data redis password is not set") + } + annoConfig := config.Common + log.Printf("Annotate redis config Addr:%s:%s DB:%d", annoConfig.RedisHost, annoConfig.RedisPort, annoConfig.RedisDB) + if annoConfig.RedisPassword == "" { + log.Printf("Annotate redis password is not set") + } + + var waitgroup sync.WaitGroup + ctx := context.Background() + + waitgroup.Add(1) + go func() { + defer waitgroup.Done() + ticker := time.NewTicker(time.Duration(config.Threshold.Interval) * time.Second) + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + result1 := read(&config) + // analysis() + result2 := evaluate(&config, result1) + transmit(&config, result2) + } + } + }() + + waitgroup.Wait() + + fmt.Println("End") +} diff --git a/src/dma/cmd/threshold/read.go b/src/dma/cmd/threshold/read.go new file mode 100644 index 00000000..0b0cf5a1 --- /dev/null +++ b/src/dma/cmd/threshold/read.go @@ -0,0 +1,82 @@ +/* + * Copyright 2018 NEC Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "fmt" + "github.com/go-redis/redis" + "os" + "strconv" + "strings" + "time" +) + +// e.g. collectd/instance-00000001/virt/if_octets-tapd21acb51-35 +const redisKey = "collectd/*/virt/if_octets-*" + +func zrangebyscore(config *Config, client *redis.Client, key string) []int { + + unixNow := int(time.Now().Unix()) + + val, err := client.ZRangeByScore(key, redis.ZRangeBy{ + Min: strconv.Itoa(unixNow - config.Threshold.Interval), + Max: strconv.Itoa(unixNow), + }).Result() + + datalist := []int{} + + if err == redis.Nil { + fmt.Println("this key is not exist") + os.Exit(1) + } else if err != nil { + panic(err) + } else { + for _, strVal := range val { + split := strings.Split(strVal, ":") + txVal := split[2] + floatVal, err := strconv.ParseFloat(txVal, 64) + if err != nil { + os.Exit(1) + } + datalist = append(datalist, int(floatVal)) + } + } + return datalist +} + +func read(config *Config) []rawData { + thresConfig := config.Threshold + + client := redis.NewClient(&redis.Options{ + Addr: thresConfig.RedisHost + ":" + thresConfig.RedisPort, + Password: thresConfig.RedisPassword, + DB: thresConfig.RedisDB, + }) + + keys, err := client.Keys(redisKey).Result() + if err != nil { + panic(err) + } + + rdlist := []rawData{} + + for _, key := range keys { + rdlist = append(rdlist, rawData{key, zrangebyscore(config, client, key)}) + } + + return rdlist +} diff --git a/src/dma/cmd/threshold/transmit.go b/src/dma/cmd/threshold/transmit.go new file mode 100644 index 00000000..8cac2a88 --- /dev/null +++ b/src/dma/cmd/threshold/transmit.go @@ -0,0 +1,97 @@ +/* + * Copyright 2018 NEC Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "bytes" + "fmt" + "github.com/distributed-monitoring/agent/pkg/common" + "github.com/go-redis/redis" + "strconv" + "strings" + "time" +) + +type collectdNotifier struct { + pluginName string + typeName string +} + +func send(cn collectdNotifier, message string, severity string, metaData [][2]string) error { + unixNow := float64(time.Now().UnixNano()) / 1000000000 + + var metaDataStr bytes.Buffer + for _, data := range metaData { + metaDataStr.WriteString(" s:") + metaDataStr.WriteString(data[0]) + metaDataStr.WriteString("=\"") + metaDataStr.WriteString(strings.Replace(data[1], "\"", "\\\"", -1)) + metaDataStr.WriteString("\"") + } + + fmt.Printf("PUTNOTIF message=\"%s\" severity=%s time=%f "+ + "host=localhost plugin=%s type=%s %s\n", + message, severity, unixNow, cn.pluginName, cn.typeName, metaDataStr.String()) + + return nil +} + +func transmit(config *Config, edlist []evalData) { + annoConfig := config.Common + + client := redis.NewClient(&redis.Options{ + Addr: annoConfig.RedisHost + ":" + annoConfig.RedisPort, + Password: annoConfig.RedisPassword, + DB: annoConfig.RedisDB, + }) + pool := common.RedisPool{Client: client} + + notifier := collectdNotifier{ + pluginName: config.Threshold.CollectdPlugin, + typeName: config.Threshold.CollectdType} + + for _, ed := range edlist { + if ed.label == 1 { + + fmt.Println("kick action") + + item := strings.Split(ed.key, "/") + ifItem := strings.SplitN(item[3], "-", 2) + virtName := item[1] + virtIF := ifItem[1] + + var message bytes.Buffer + message.WriteString("Value exceeded threshold ") + message.WriteString(strconv.Itoa(config.Threshold.Min)) + message.WriteString(".") + + nameVal, _ := pool.Get(fmt.Sprintf("%s/%s/vminfo", "vm", virtName)) + ifVal, _ := pool.Get(fmt.Sprintf("%s/%s/neutron_network", "if", virtIF)) + + nameInfo := fmt.Sprintf("{\"%s\": %s}", virtName, nameVal) + ifInfo := fmt.Sprintf("{\"%s\": %s}", virtIF, ifVal) + + fmt.Println(nameInfo) + fmt.Println(ifInfo) + + send(notifier, message.String(), + "warning", + [][2]string{{"vminfo", nameInfo}, {"neutron_network", ifInfo}}) + + } + } +} diff --git a/src/dma/examples/config.toml b/src/dma/examples/config.toml new file mode 100644 index 00000000..ccddc759 --- /dev/null +++ b/src/dma/examples/config.toml @@ -0,0 +1,42 @@ +[common] +# All daemons share redis DB for annotation etc. +redis_host = "localhost" +redis_port = "6379" +redis_password = "" +redis_db = 0 + + +[server] +listen_port = "12345" + +amqp_host = "overcloud-controller-0.internalapi" +amqp_user = "guest" +amqp_password = "xxxxxxxxxxxxxxxxxxxxxxxxx" +amqp_port = "5672" + +collectd_confdir = "/etc/collectd/collectd.conf.d" + + +[infofetch] +os_username = "admin" +os_user_domain_name = "Default" +os_project_domain_name = "Default" +os_project_name = "admin" +os_password = "xxxxxxxxxxxxxxxxxxxxxxxxx" +os_auth_url = "http://overcloud-controller-0.internalapi:5000/v3" + + +[threshold] +# redis for raw metrics data +redis_host = "localhost" +redis_port = "6379" +redis_password = "" +redis_db = 0 + +# exceed by exec "sudo ping -i 0.00005 -c 1000 -s 1000 -q " +interval = 5 +min = 1000000 + +collectd_plugin = "barometer-dma" +collectd_type = "if-octets-threshold" + diff --git a/src/dma/pkg/common/redispool.go b/src/dma/pkg/common/redispool.go new file mode 100644 index 00000000..3c19a195 --- /dev/null +++ b/src/dma/pkg/common/redispool.go @@ -0,0 +1,72 @@ +/* + * Copyright 2018 NEC Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +import ( + "github.com/go-redis/redis" + "log" +) + +// RedisPool is an implementation of Pool by redis. +type RedisPool struct { + Client *redis.Client +} + +// Set is to set data in redis. +func (thisPool RedisPool) Set(key string, data string) error { + key = redisLabel + "/" + key + err := thisPool.Client.Set(key, data, 0).Err() + if err != nil { + log.Printf("redis Set error: %s", err) + } + return err +} + +// Get is to get data in redis. +func (thisPool RedisPool) Get(key string) (string, error) { + key = redisLabel + "/" + key + value, err := thisPool.Client.Get(key).Result() + if err != nil { + log.Printf("redis Get error: %s", err) + } + return value, err +} + +// Del is to delete data in redis. +func (thisPool RedisPool) Del(key string) error { + key = redisLabel + "/" + key + err := thisPool.Client.Del(key).Err() + if err != nil { + log.Printf("redis Del error: %s", err) + } + return err +} + +// DelAll is to delete all data, begins with , in redis. +func (thisPool RedisPool) DelAll() error { + pattern := redisLabel + "/*" + + keys, err := thisPool.Client.Keys(pattern).Result() + if err != nil { + log.Printf("redis Keys error :%s", err) + } + + for _, v := range keys { + thisPool.Client.Del(v) + } + return err +} diff --git a/src/dma/pkg/common/types.go b/src/dma/pkg/common/types.go new file mode 100644 index 00000000..98f605e9 --- /dev/null +++ b/src/dma/pkg/common/types.go @@ -0,0 +1,29 @@ +/* + * Copyright 2018 NEC Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +// redisLabel is prefix of local-agent for redis +const redisLabel = "barometer-dma" + +// Pool is an interface of DB pool to annotate. +// e.g. Set("virt_name/instance-00000001", "{"OS-name": "testvm1"}") +// e.g. Set("virt_if/tap1e793b2b-8e", "{"OS-uuid": "df846647-c16a-4d8a-842a-ac39bd4a971e"}") +type Pool interface { + Set(string, string) error // (key, JsonData) + Get(string) (string, error) // (key, infoName) + Del(string) error // (key) +} diff --git a/src/dma/sampleconf/demo-memory-linearreg/analysis.conf b/src/dma/sampleconf/demo-memory-linearreg/analysis.conf new file mode 100644 index 00000000..33faee1d --- /dev/null +++ b/src/dma/sampleconf/demo-memory-linearreg/analysis.conf @@ -0,0 +1,12 @@ +LoadPlugin python + + ModulePath "/opt/dma/lib" + LogTraces true + Interactive false + Import "analysis" + #Import "write_nova-migrate" + + + + + diff --git a/src/dma/sampleconf/demo-memory-linearreg/evaluator.conf b/src/dma/sampleconf/demo-memory-linearreg/evaluator.conf new file mode 100644 index 00000000..59af25b4 --- /dev/null +++ b/src/dma/sampleconf/demo-memory-linearreg/evaluator.conf @@ -0,0 +1,35 @@ +LoadPlugin threshold + + + + + WarningMax 7000000000 + Hits 5 + + + + + +LoadPlugin match_regex +LoadPlugin match_value + + + + Plugin "dma" + #Type "^memory$" + #TypeInstance "^show_" + + + Min 7000000000 + + + Plugin "write_http/mynode" + + + + Plugin "write_redis/mynode" + + + Plugin "threshold" + + diff --git a/src/dma/sampleconf/demo-memory-linearreg/read-metrics.conf b/src/dma/sampleconf/demo-memory-linearreg/read-metrics.conf new file mode 100644 index 00000000..1926aaa5 --- /dev/null +++ b/src/dma/sampleconf/demo-memory-linearreg/read-metrics.conf @@ -0,0 +1,7 @@ + + Interval 0.1 + + + ValuesAbsolute true + ValuesPercentage false + diff --git a/src/dma/sampleconf/demo-memory-linearreg/transmitter.conf b/src/dma/sampleconf/demo-memory-linearreg/transmitter.conf new file mode 100644 index 00000000..e9d5bb19 --- /dev/null +++ b/src/dma/sampleconf/demo-memory-linearreg/transmitter.conf @@ -0,0 +1,11 @@ +LoadPlugin write_http + + + URL "http://192.0.2.11:12345/failure" + Format "Json" +# Select Metrics or Notification + Metrics false + Notifications true + BufferSize 1024 + + diff --git a/src/dma/sampleconf/demo-memory-linearreg/write-redis.conf b/src/dma/sampleconf/demo-memory-linearreg/write-redis.conf new file mode 100644 index 00000000..348ad02f --- /dev/null +++ b/src/dma/sampleconf/demo-memory-linearreg/write-redis.conf @@ -0,0 +1,10 @@ +LoadPlugin write_redis + + + Host "localhost" + Port "6379" + Timeout 2000 + MaxSetDuration 60 + + + diff --git a/src/dma/sampleconf/simple-threshold/evaluator.conf b/src/dma/sampleconf/simple-threshold/evaluator.conf new file mode 100644 index 00000000..9ebb65fc --- /dev/null +++ b/src/dma/sampleconf/simple-threshold/evaluator.conf @@ -0,0 +1,7 @@ + + Interval 1 + + + Exec "heat-admin:heat-admin" "sudo" "-i" "threshold" + + diff --git a/src/dma/sampleconf/simple-threshold/read-metrics.conf b/src/dma/sampleconf/simple-threshold/read-metrics.conf new file mode 100644 index 00000000..d3960be0 --- /dev/null +++ b/src/dma/sampleconf/simple-threshold/read-metrics.conf @@ -0,0 +1,17 @@ +# +# Interval 0.1 +# + +# +# Interface "eth0" +# IgnoreSelected false +# ReportInactive true +# UniqueName false +# + + + Interval 0.1 + + + Connection "qemu:///system" + diff --git a/src/dma/sampleconf/simple-threshold/transmitter.conf b/src/dma/sampleconf/simple-threshold/transmitter.conf new file mode 100644 index 00000000..29a186e4 --- /dev/null +++ b/src/dma/sampleconf/simple-threshold/transmitter.conf @@ -0,0 +1,11 @@ +LoadPlugin write_http + + + URL "http://overcloud-controller-0.internalapi:12345/failure" + Format "Json" +# Select Metrics or Notification + Metrics false + Notifications true + BufferSize 1024 + + diff --git a/src/dma/sampleconf/simple-threshold/write-redis.conf b/src/dma/sampleconf/simple-threshold/write-redis.conf new file mode 100644 index 00000000..84eb53d2 --- /dev/null +++ b/src/dma/sampleconf/simple-threshold/write-redis.conf @@ -0,0 +1,10 @@ +LoadPlugin write_redis + + + Host "localhost" + Port "6379" + Timeout 2000 + #MaxSetDuration 86400 + + + diff --git a/src/dma/vendor/github.com/BurntSushi/toml/.gitignore b/src/dma/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 00000000..0cd38003 --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,5 @@ +TAGS +tags +.*.swp +tomlcheck/tomlcheck +toml.test diff --git a/src/dma/vendor/github.com/BurntSushi/toml/.travis.yml b/src/dma/vendor/github.com/BurntSushi/toml/.travis.yml new file mode 100644 index 00000000..8b8afc4f --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/.travis.yml @@ -0,0 +1,15 @@ +language: go +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip +install: + - go install ./... + - go get github.com/BurntSushi/toml-test +script: + - export PATH="$PATH:$HOME/gopath/bin" + - make test diff --git a/src/dma/vendor/github.com/BurntSushi/toml/COMPATIBLE b/src/dma/vendor/github.com/BurntSushi/toml/COMPATIBLE new file mode 100644 index 00000000..6efcfd0c --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/COMPATIBLE @@ -0,0 +1,3 @@ +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md) + diff --git a/src/dma/vendor/github.com/BurntSushi/toml/COPYING b/src/dma/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 00000000..5a8e3325 --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/src/dma/vendor/github.com/BurntSushi/toml/Makefile b/src/dma/vendor/github.com/BurntSushi/toml/Makefile new file mode 100644 index 00000000..3600848d --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/Makefile @@ -0,0 +1,19 @@ +install: + go install ./... + +test: install + go test -v + toml-test toml-test-decoder + toml-test -encoder toml-test-encoder + +fmt: + gofmt -w *.go */*.go + colcheck *.go */*.go + +tags: + find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS + +push: + git push origin master + git push github master + diff --git a/src/dma/vendor/github.com/BurntSushi/toml/README.md b/src/dma/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 00000000..7c1b37ec --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,218 @@ +## TOML parser and encoder for Go with reflection + +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` +packages. This package also supports the `encoding.TextUnmarshaler` and +`encoding.TextMarshaler` interfaces so that you can define custom data +representations. (There is an example of this below.) + +Spec: https://github.com/toml-lang/toml + +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) + +Documentation: https://godoc.org/github.com/BurntSushi/toml + +Installation: + +```bash +go get github.com/BurntSushi/toml +``` + +Try the toml validator: + +```bash +go get github.com/BurntSushi/toml/cmd/tomlv +tomlv some-toml-file.toml +``` + +[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml) + +### Testing + +This package passes all tests in +[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder +and the encoder. + +### Examples + +This package works similarly to how the Go standard library handles `XML` +and `JSON`. Namely, data is loaded into Go values via reflection. + +For the simplest example, consider some TOML file as just a list of keys +and values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which could be defined in Go as: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time // requires `import time` +} +``` + +And then decoded with: + +```go +var conf Config +if _, err := toml.Decode(tomlData, &conf); err != nil { + // handle error +} +``` + +You can also use struct tags if your struct field name doesn't map to a TOML +key value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +### Using the `encoding.TextUnmarshaler` interface + +Here's an example that automatically parses duration strings into +`time.Duration` values: + +```toml +[[song]] +name = "Thunder Road" +duration = "4m49s" + +[[song]] +name = "Stairway to Heaven" +duration = "8m03s" +``` + +Which can be decoded with: + +```go +type song struct { + Name string + Duration duration +} +type songs struct { + Song []song +} +var favorites songs +if _, err := toml.Decode(blob, &favorites); err != nil { + log.Fatal(err) +} + +for _, s := range favorites.Song { + fmt.Printf("%s (%s)\n", s.Name, s.Duration) +} +``` + +And you'll also need a `duration` type that satisfies the +`encoding.TextUnmarshaler` interface: + +```go +type duration struct { + time.Duration +} + +func (d *duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} +``` + +### More complex usage + +Here's an example of how to load the example from the official spec page: + +```toml +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] +``` + +And the corresponding Go types are: + +```go +type tomlConfig struct { + Title string + Owner ownerInfo + DB database `toml:"database"` + Servers map[string]server + Clients clients +} + +type ownerInfo struct { + Name string + Org string `toml:"organization"` + Bio string + DOB time.Time +} + +type database struct { + Server string + Ports []int + ConnMax int `toml:"connection_max"` + Enabled bool +} + +type server struct { + IP string + DC string +} + +type clients struct { + Data [][]interface{} + Hosts []string +} +``` + +Note that a case insensitive match will be tried if an exact match can't be +found. + +A working example of the above can be found in `_examples/example.{go,toml}`. diff --git a/src/dma/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/src/dma/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING new file mode 100644 index 00000000..5a8e3325 --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/src/dma/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/src/dma/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING new file mode 100644 index 00000000..5a8e3325 --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/src/dma/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/src/dma/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING new file mode 100644 index 00000000..5a8e3325 --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/src/dma/vendor/github.com/BurntSushi/toml/decode.go b/src/dma/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 00000000..b0fd51d5 --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,509 @@ +package toml + +import ( + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "strings" + "time" +) + +func e(format string, args ...interface{}) error { + return fmt.Errorf("toml: "+format, args...) +} + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. +func Unmarshal(p []byte, v interface{}) error { + _, err := Decode(string(p), v) + return err +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// When using the various `Decode*` functions, the type `Primitive` may +// be given to any value, and its decoding will be delayed. +// +// A `Primitive` value can be decoded using the `PrimitiveDecode` function. +// +// The underlying representation of a `Primitive` value is subject to change. +// Do not rely on it. +// +// N.B. Primitive values are still parsed, so using them will only avoid +// the overhead of reflection. They can be useful when you don't know the +// exact type of TOML data until run time. +type Primitive struct { + undecoded interface{} + context Key +} + +// DEPRECATED! +// +// Use MetaData.PrimitiveDecode instead. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]bool)} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// PrimitiveDecode is just like the other `Decode*` functions, except it +// decodes a TOML value that has already been parsed. Valid primitive values +// can *only* be obtained from values filled by the decoder functions, +// including this method. (i.e., `v` may contain more `Primitive` +// values.) +// +// Meta data for primitive values is included in the meta data returned by +// the `Decode*` functions with one exception: keys returned by the Undecoded +// method will only reflect keys that were decoded. Namely, any keys hidden +// behind a Primitive will be considered undecoded. Executing this method will +// update the undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Decode will decode the contents of `data` in TOML format into a pointer +// `v`. +// +// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be +// used interchangeably.) +// +// TOML arrays of tables correspond to either a slice of structs or a slice +// of maps. +// +// TOML datetimes correspond to Go `time.Time` values. +// +// All other TOML types (float, string, int, bool and array) correspond +// to the obvious Go types. +// +// An exception to the above rules is if a type implements the +// encoding.TextUnmarshaler interface. In this case, any primitive TOML value +// (floats, strings, integers, booleans and datetimes) will be converted to +// a byte string and given to the value's UnmarshalText method. See the +// Unmarshaler example for a demonstration with time duration strings. +// +// Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go +// struct. The special `toml` struct tag may be used to map TOML keys to +// struct fields that don't match the key name exactly. (See the example.) +// A case insensitive match to struct names will be tried if an exact match +// can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there +// may exist TOML values that cannot be placed into your representation, and +// there may be parts of your representation that do not correspond to +// TOML values. This loose mapping can be made stricter by using the IsDefined +// and/or Undecoded methods on the MetaData returned. +// +// This decoder will not handle cyclic types. If a cyclic type is passed, +// `Decode` will not terminate. +func Decode(data string, v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) + } + p, err := parse(data) + if err != nil { + return MetaData{}, err + } + md := MetaData{ + p.mapping, p.types, p.ordered, + make(map[string]bool, len(p.ordered)), nil, + } + return md, md.unify(p.mapping, indirect(rv)) +} + +// DecodeFile is just like Decode, except it will automatically read the +// contents of the file at `fpath` and decode it for you. +func DecodeFile(fpath string, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadFile(fpath) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// DecodeReader is just like Decode, except it will consume all bytes +// from the reader and decode it for you. +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadAll(r) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + + // Special case. Look for a `Primitive` value. + if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + // Special case. Unmarshaler Interface support. + if rv.CanAddr() { + if v, ok := rv.Addr().Interface().(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + } + + // Special case. Handle time.Time values specifically. + // TODO: Remove this code when we decide to drop support for Go 1.1. + // This isn't necessary in Go 1.2 because time.Time satisfies the encoding + // interfaces. + if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { + return md.unifyDatetime(data, rv) + } + + // Special case. Look for a value satisfying the TextUnmarshaler interface. + if v, ok := rv.Interface().(TextUnmarshaler); ok { + return md.unifyText(data, v) + } + // BUG(burntsushi) + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML + // hash or array. In particular, the unmarshaler should only be applied + // to primitive TOML values. But at this point, it will be applied to + // all kinds of values and produce an incorrect error whenever those values + // are hashes or arrays (including arrays of tables). + + k := rv.Kind() + + // laziness + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + // we only support empty interfaces. + if rv.NumMethod() > 0 { + return e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32: + fallthrough + case reflect.Float64: + return md.unifyFloat64(data, rv) + } + return e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = true + md.context = append(md.context, key) + if err := md.unify(datum, subv); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + // Bad user! No soup for you! + return e("cannot write unexported field %s.%s", + rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = true + md.context = append(md.context, k) + + rvkey := indirect(reflect.New(rv.Type().Key())) + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + if err := md.unify(v, rvval); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey.SetString(k) + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + sliceLen := datav.Len() + if sliceLen != rv.Len() { + return e("expected array length %d; got TOML array of length %d", + rv.Len(), sliceLen) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + sliceLen := data.Len() + for i := 0; i < sliceLen; i++ { + v := data.Index(i).Interface() + sliceval := indirect(rv.Index(i)) + if err := md.unify(v, sliceval); err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { + if _, ok := data.(time.Time); ok { + rv.Set(reflect.ValueOf(data)) + return nil + } + return badtype("time.Time", data) +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + if num, ok := data.(float64); ok { + switch rv.Kind() { + case reflect.Float32: + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + return badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + if num, ok := data.(int64); ok { + if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { + switch rv.Kind() { + case reflect.Int, reflect.Int64: + // No bounds checking necessary. + case reflect.Int8: + if num < math.MinInt8 || num > math.MaxInt8 { + return e("value %d is out of range for int8", num) + } + case reflect.Int16: + if num < math.MinInt16 || num > math.MaxInt16 { + return e("value %d is out of range for int16", num) + } + case reflect.Int32: + if num < math.MinInt32 || num > math.MaxInt32 { + return e("value %d is out of range for int32", num) + } + } + rv.SetInt(num) + } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { + unum := uint64(num) + switch rv.Kind() { + case reflect.Uint, reflect.Uint64: + // No bounds checking necessary. + case reflect.Uint8: + if num < 0 || unum > math.MaxUint8 { + return e("value %d is out of range for uint8", num) + } + case reflect.Uint16: + if num < 0 || unum > math.MaxUint16 { + return e("value %d is out of range for uint16", num) + } + case reflect.Uint32: + if num < 0 || unum > math.MaxUint32 { + return e("value %d is out of range for uint32", num) + } + } + rv.SetUint(unum) + } else { + panic("unreachable") + } + return nil + } + return badtype("integer", data) +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// Pointers are followed until the value is not a pointer. +// New values are allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of +// interest to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + if _, ok := pv.Interface().(TextUnmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + if _, ok := rv.Interface().(TextUnmarshaler); ok { + return true + } + return false +} + +func badtype(expected string, data interface{}) error { + return e("cannot load TOML value of type %T into a Go %s", data, expected) +} diff --git a/src/dma/vendor/github.com/BurntSushi/toml/decode_meta.go b/src/dma/vendor/github.com/BurntSushi/toml/decode_meta.go new file mode 100644 index 00000000..b9914a67 --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/decode_meta.go @@ -0,0 +1,121 @@ +package toml + +import "strings" + +// MetaData allows access to meta information about TOML data that may not +// be inferrable via reflection. In particular, whether a key has been defined +// and the TOML type of a key. +type MetaData struct { + mapping map[string]interface{} + types map[string]tomlType + keys []Key + decoded map[string]bool + context Key // Used only during decoding. +} + +// IsDefined returns true if the key given exists in the TOML data. The key +// should be specified hierarchially. e.g., +// +// // access the TOML key 'a.b.c' +// IsDefined("a", "b", "c") +// +// IsDefined will return false if an empty key given. Keys are case sensitive. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var hash map[string]interface{} + var ok bool + var hashOrVal interface{} = md.mapping + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that +// does not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + fullkey := strings.Join(key, ".") + if typ, ok := md.types[fullkey]; ok { + return typ.typeString() + } + return "" +} + +// Key is the type of any TOML key, including key groups. Use (MetaData).Keys +// to get values of this type. +type Key []string + +func (k Key) String() string { + return strings.Join(k, ".") +} + +func (k Key) maybeQuotedAll() string { + var ss []string + for i := range k { + ss = append(ss, k.maybeQuoted(i)) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + quote := false + for _, c := range k[i] { + if !isBareKeyChar(c) { + quote = true + break + } + } + if quote { + return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. +// +// The list will have the same order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a Primitive value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if !md.decoded[key.String()] { + undecoded = append(undecoded, key) + } + } + return undecoded +} diff --git a/src/dma/vendor/github.com/BurntSushi/toml/doc.go b/src/dma/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 00000000..b371f396 --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,27 @@ +/* +Package toml provides facilities for decoding and encoding TOML configuration +files via reflection. There is also support for delaying decoding with +the Primitive type, and querying the set of keys in a TOML document with the +MetaData type. + +The specification implemented: https://github.com/toml-lang/toml + +The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify +whether a file is a valid TOML document. It can also be used to print the +type of each key in a TOML document. + +Testing + +There are two important types of tests used for this package. The first is +contained inside '*_test.go' files and uses the standard Go unit testing +framework. These tests are primarily devoted to holistically testing the +decoder and encoder. + +The second type of testing is used to verify the implementation's adherence +to the TOML specification. These tests have been factored into their own +project: https://github.com/BurntSushi/toml-test + +The reason the tests are in a separate project is so that they can be used by +any implementation of TOML. Namely, it is language agnostic. +*/ +package toml diff --git a/src/dma/vendor/github.com/BurntSushi/toml/encode.go b/src/dma/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 00000000..d905c21a --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,568 @@ +package toml + +import ( + "bufio" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayMixedElementTypes = errors.New( + "toml: cannot encode array with mixed element types") + errArrayNilElement = errors.New( + "toml: cannot encode array with nil element") + errNonString = errors.New( + "toml: cannot encode a map with non-string key type") + errAnonNonStruct = errors.New( + "toml: cannot encode an anonymous field that is not a struct") + errArrayNoTable = errors.New( + "toml: TOML array element cannot contain a table") + errNoKey = errors.New( + "toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var quotedReplacer = strings.NewReplacer( + "\t", "\\t", + "\n", "\\n", + "\r", "\\r", + "\"", "\\\"", + "\\", "\\\\", +) + +// Encoder controls the encoding of Go values to a TOML document to some +// io.Writer. +// +// The indentation level can be controlled with the Indent field. +type Encoder struct { + // A single indentation level. By default it is two spaces. + Indent string + + // hasWritten is whether we have written any output to w yet. + hasWritten bool + w *bufio.Writer +} + +// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer +// given. By default, a single indentation level is 2 spaces. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the underlying +// io.Writer. If the value given cannot be encoded to a valid TOML document, +// then an error is returned. +// +// The mapping between Go values and TOML values should be precisely the same +// as for the Decode* functions. Similarly, the TextMarshaler interface is +// supported by encoding the resulting bytes as strings. (If you want to write +// arbitrary binary data then you will need to use something like base64 since +// TOML does not have any binary types.) +// +// When encoding TOML hashes (i.e., Go maps or structs), keys without any +// sub-hashes are encoded first. +// +// If a Go map is encoded, then its keys are sorted alphabetically for +// deterministic output. More control over this behavior may be provided if +// there is demand for it. +// +// Encoding Go values without a corresponding TOML representation---like map +// types with non-string keys---will cause an error to be returned. Similarly +// for mixed arrays/slices, arrays/slices with nil elements, embedded +// non-struct types and nested slices containing maps or structs. +// (e.g., [][]map[string]string is not allowed but []map[string]string is OK +// and so is []map[string][]string.) +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + if err := enc.safeEncode(Key([]string{}), rv); err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // Special case. Time needs to be in ISO8601 format. + // Special case. If we can marshal the type to text, then we used that. + // Basically, this prevents the encoder for handling these types as + // generic structs (or whatever the underlying type of a TextMarshaler is). + switch rv.Interface().(type) { + case time.Time, TextMarshaler: + enc.keyEqElement(key, rv) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.keyEqElement(key, rv) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.keyEqElement(key, rv) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + panic(e("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element (primitives and +// arrays). +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: + // Special case time.Time as a primitive. Has to come before + // TextMarshaler below because time.Time implements + // encoding.TextMarshaler, but we need to always use UTC. + enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) + return + case TextMarshaler: + // Special case. Use text marshaler if it's available for this value. + if s, err := v.MarshalText(); err != nil { + encPanic(err) + } else { + enc.writeQuoted(string(s)) + } + return + } + switch rv.Kind() { + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, + reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) + case reflect.Float64: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Interface: + enc.eElement(rv.Elem()) + case reflect.String: + enc.writeQuoted(rv.String()) + default: + panic(e("unexpected primitive type: %s", rv.Kind())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one +// number on either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", quotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := rv.Index(i) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := rv.Index(i) + if isNil(trv) { + continue + } + panicIfInvalidKey(key) + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + enc.eMapOrStruct(key, trv) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + panicIfInvalidKey(key) + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + } + enc.eMapOrStruct(key, rv) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { + switch rv := eindirect(rv); rv.Kind() { + case reflect.Map: + enc.eMap(key, rv) + case reflect.Struct: + enc.eStruct(key, rv) + default: + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string) { + sort.Strings(mapKeys) + for _, mapKey := range mapKeys { + mrv := rv.MapIndex(reflect.ValueOf(mapKey)) + if isNil(mrv) { + // Don't write anything for nil fields. + continue + } + enc.encode(key.add(mapKey), mrv) + } + } + writeMapKeys(mapKeysDirect) + writeMapKeys(mapKeysSub) +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table, then all keys under it will be in that + // table (not the one we're writing here). + rt := rv.Type() + var fieldsDirect, fieldsSub [][]int + var addFields func(rt reflect.Type, rv reflect.Value, start []int) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + // skip unexported fields + if f.PkgPath != "" && !f.Anonymous { + continue + } + frv := rv.Field(i) + if f.Anonymous { + t := f.Type + switch t.Kind() { + case reflect.Struct: + // Treat anonymous struct fields with + // tag names as though they are not + // anonymous, like encoding/json does. + if getOptions(f.Tag).name == "" { + addFields(t, frv, f.Index) + continue + } + case reflect.Ptr: + if t.Elem().Kind() == reflect.Struct && + getOptions(f.Tag).name == "" { + if !frv.IsNil() { + addFields(t.Elem(), frv.Elem(), f.Index) + } + continue + } + // Fall through to the normal field encoding logic below + // for non-struct anonymous fields. + } + } + + if typeIsHash(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + var writeFields = func(fields [][]int) { + for _, fieldIndex := range fields { + sft := rt.FieldByIndex(fieldIndex) + sf := rv.FieldByIndex(fieldIndex) + if isNil(sf) { + // Don't write anything for nil fields. + continue + } + + opts := getOptions(sft.Tag) + if opts.skip { + continue + } + keyName := sft.Name + if opts.name != "" { + keyName = opts.name + } + if opts.omitempty && isEmpty(sf) { + continue + } + if opts.omitzero && isZero(sf) { + continue + } + + enc.encode(key.add(keyName), sf) + } + } + writeFields(fieldsDirect) + writeFields(fieldsSub) +} + +// tomlTypeName returns the TOML type name of the Go value's type. It is +// used to determine whether the types of array elements are mixed (which is +// forbidden). If the Go value is nil, then it is illegal for it to be an array +// element, and valueIsNil is returned as true. + +// Returns the TOML type of a Go value. The type may be `nil`, which means +// no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if typeEqual(tomlHash, tomlArrayType(rv)) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + case reflect.Struct: + switch rv.Interface().(type) { + case time.Time: + return tomlDatetime + case TextMarshaler: + return tomlString + default: + return tomlHash + } + default: + panic("unexpected reflect.Kind: " + rv.Kind().String()) + } +} + +// tomlArrayType returns the element type of a TOML array. The type returned +// may be nil if it cannot be determined (e.g., a nil slice or a zero length +// slize). This function may also panic if it finds a type that cannot be +// expressed in TOML (such as nil elements, heterogeneous arrays or directly +// nested arrays of tables). +func tomlArrayType(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { + return nil + } + firstType := tomlTypeOfGo(rv.Index(0)) + if firstType == nil { + encPanic(errArrayNilElement) + } + + rvlen := rv.Len() + for i := 1; i < rvlen; i++ { + elem := rv.Index(i) + switch elemType := tomlTypeOfGo(elem); { + case elemType == nil: + encPanic(errArrayNilElement) + case !typeEqual(firstType, elemType): + encPanic(errArrayMixedElementTypes) + } + } + // If we have a nested array, then we must make sure that the nested + // array contains ONLY primitives. + // This checks arbitrarily nested arrays. + if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { + nest := tomlArrayType(eindirect(rv.Index(0))) + if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { + encPanic(errArrayNoTable) + } + } + return firstType +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Bool: + return !rv.Bool() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + panicIfInvalidKey(key) + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + enc.newline() +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +func eindirect(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + return eindirect(v.Elem()) + default: + return v + } +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} + +func panicIfInvalidKey(key Key) { + for _, k := range key { + if len(k) == 0 { + encPanic(e("Key '%s' is not a valid table name. Key names "+ + "cannot be empty.", key.maybeQuotedAll())) + } + } +} + +func isValidKeyName(s string) bool { + return len(s) != 0 +} diff --git a/src/dma/vendor/github.com/BurntSushi/toml/encoding_types.go b/src/dma/vendor/github.com/BurntSushi/toml/encoding_types.go new file mode 100644 index 00000000..d36e1dd6 --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/encoding_types.go @@ -0,0 +1,19 @@ +// +build go1.2 + +package toml + +// In order to support Go 1.1, we define our own TextMarshaler and +// TextUnmarshaler types. For Go 1.2+, we just alias them with the +// standard library interfaces. + +import ( + "encoding" +) + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler encoding.TextMarshaler + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler encoding.TextUnmarshaler diff --git a/src/dma/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/src/dma/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go new file mode 100644 index 00000000..e8d503d0 --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go @@ -0,0 +1,18 @@ +// +build !go1.2 + +package toml + +// These interfaces were introduced in Go 1.2, so we add them manually when +// compiling for Go 1.1. + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler interface { + MarshalText() (text []byte, err error) +} + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler interface { + UnmarshalText(text []byte) error +} diff --git a/src/dma/vendor/github.com/BurntSushi/toml/lex.go b/src/dma/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 00000000..6dee7fc7 --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,953 @@ +package toml + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const ( + eof = 0 + comma = ',' + tableStart = '[' + tableEnd = ']' + arrayTableStart = '[' + arrayTableEnd = ']' + tableSep = '.' + keySep = '=' + arrayStart = '[' + arrayEnd = ']' + commentStart = '#' + stringStart = '"' + stringEnd = '"' + rawStringStart = '\'' + rawStringEnd = '\'' + inlineTableStart = '{' + inlineTableEnd = '}' +) + +type stateFn func(lx *lexer) stateFn + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + + // Allow for backing up up to three runes. + // This is necessary because TOML contains 3-rune tokens (""" and '''). + prevWidths [3]int + nprev int // how many of prevWidths are in use + // If we emit an eof, we can still back up, but it is not OK to call + // next again. + atEOF bool + + // A stack of state functions used to maintain context. + // The idea is to reuse parts of the state machine in various places. + // For example, values can appear at the top level or within arbitrarily + // nested arrays. The last state on the stack is used after a value has + // been lexed. Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + line int +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + } + } +} + +func lex(input string) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + line: 1, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx *lexer) emit(typ itemType) { + lx.items <- item{typ, lx.current(), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 3 { + lx.nprev++ + } + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called only twice between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.nprev-- + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// errorf stops all lexing by emitting an error and returning `nil`. +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + lx.items <- item{ + itemError, + fmt.Sprintf(format, values...), + lx.line, + } + return nil +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case commentStart: + lx.push(lexTop) + return lexCommentStart + case tableStart: + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == commentStart: + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf("expected a top-level item to end with a newline, "+ + "comment, or EOF, but got %q instead", r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == arrayTableStart { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != arrayTableEnd { + return lx.errorf("expected end of table array name delimiter %q, "+ + "but got %q instead", arrayTableEnd, r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == tableEnd || r == eof: + return lx.errorf("unexpected end of table name " + + "(table names cannot be empty)") + case r == tableSep: + return lx.errorf("unexpected table separator " + + "(table names cannot be empty)") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexTableNameEnd) + return lexValue // reuse string lexing + default: + return lexBareTableName + } +} + +// lexBareTableName lexes the name of a table. It assumes that at least one +// valid character for the table has already been read. +func lexBareTableName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareTableName + } + lx.backup() + lx.emit(itemText) + return lexTableNameEnd +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == tableSep: + lx.ignore() + return lexTableNameStart + case r == tableEnd: + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, "+ + "but got %q instead", r) + } +} + +// lexKeyStart consumes a key name up until the first non-whitespace character. +// lexKeyStart will ignore whitespace. +func lexKeyStart(lx *lexer) stateFn { + r := lx.peek() + switch { + case r == keySep: + return lx.errorf("unexpected key separator %q", keySep) + case isWhitespace(r) || isNL(r): + lx.next() + return lexSkip(lx, lexKeyStart) + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.emit(itemKeyStart) + lx.push(lexKeyEnd) + return lexValue // reuse string lexing + default: + lx.ignore() + lx.emit(itemKeyStart) + return lexBareKey + } +} + +// lexBareKey consumes the text of a bare key. Assumes that the first character +// (which is not whitespace) has not yet been consumed. +func lexBareKey(lx *lexer) stateFn { + switch r := lx.next(); { + case isBareKeyChar(r): + return lexBareKey + case isWhitespace(r): + lx.backup() + lx.emit(itemText) + return lexKeyEnd + case r == keySep: + lx.backup() + lx.emit(itemText) + return lexKeyEnd + default: + return lx.errorf("bare keys cannot contain %q", r) + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case r == keySep: + return lexSkip(lx, lexValue) + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + default: + return lx.errorf("expected key separator %q, but got %q instead", + keySep, r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case arrayStart: + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case inlineTableStart: + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case stringStart: + if lx.accept(stringStart) { + if lx.accept(stringStart) { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case rawStringStart: + if lx.accept(rawStringStart) { + if lx.accept(rawStringStart) { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '+', '-': + return lexNumberStart + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == commentStart: + lx.push(lexArrayValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == arrayEnd: + // NOTE(caleb): The spec isn't clear about whether you can have + // a trailing comma or not, so we'll allow it. + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == commentStart: + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexArrayValue // move on to the next value + case r == arrayEnd: + return lexArrayEnd + } + return lx.errorf( + "expected a comma or array terminator %q, but got %q instead", + arrayEnd, r, + ) +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == inlineTableEnd: + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexInlineTableValue + case r == inlineTableEnd: + return lexInlineTableEnd + } + return lx.errorf("expected a comma or an inline table terminator %q, "+ + "but got %q instead", inlineTableEnd, r) +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf("unexpected EOF") + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == stringEnd: + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + switch lx.next() { + case eof: + return lx.errorf("unexpected EOF") + case '\\': + return lexMultilineStringEscape + case stringEnd: + if lx.accept(stringEnd) { + if lx.accept(stringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineString +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf("unexpected EOF") + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == rawStringEnd: + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexRawString +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning "'''" has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + switch lx.next() { + case eof: + return lx.errorf("unexpected EOF") + case rawStringEnd: + if lx.accept(rawStringEnd) { + if lx.accept(rawStringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineRawString +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + // Handle the special case first: + if isNL(lx.next()) { + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case '\\': + return lx.pop() + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.errorf("invalid escape character %q; only the following "+ + "escape characters are allowed: "+ + `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf(`expected four hexadecimal digits after '\u', `+ + "but got %q instead", lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf(`expected eight hexadecimal digits after '\U', `+ + "but got %q instead", lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart consumes either an integer, a float, or datetime. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + return lexNumber + case 'e', 'E': + return lexFloat + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + return lx.errorf("expected a digit but got %q", r) +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-': + return lexDatetime + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', 'T', ':', '.', 'Z': + return lexDatetime + } + + lx.backup() + lx.emit(itemDatetime) + return lx.pop() +} + +// lexNumberStart consumes either an integer or a float. It assumes that a sign +// has already been read, but that *no* digits have been consumed. +// lexNumberStart will move to the appropriate integer or float states. +func lexNumberStart(lx *lexer) stateFn { + // We MUST see a digit. Even floats have to start with a digit. + r := lx.next() + if !isDigit(r) { + if r == '.' { + return lx.errorf("floats must start with a digit, not '.'") + } + return lx.errorf("expected a digit but got %q", r) + } + return lexNumber +} + +// lexNumber consumes an integer or a float after seeing the first digit. +func lexNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumber + } + switch r { + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + r := lx.peek() + if isNL(r) || r == eof { + lx.emit(itemText) + return lx.pop() + } + lx.next() + return lexComment +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + return func(lx *lexer) stateFn { + lx.ignore() + return nextState + } +} + +// isWhitespace returns true if `r` is a whitespace character according +// to the spec. +func isWhitespace(r rune) bool { + return r == '\t' || r == ' ' +} + +func isNL(r rune) bool { + return r == '\n' || r == '\r' +} + +func isDigit(r rune) bool { + return r >= '0' && r <= '9' +} + +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} + +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || + r == '-' +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} diff --git a/src/dma/vendor/github.com/BurntSushi/toml/parse.go b/src/dma/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 00000000..50869ef9 --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,592 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +type parser struct { + mapping map[string]interface{} + types map[string]tomlType + lx *lexer + + // A list of keys in the order that they appear in the TOML data. + ordered []Key + + // the full key for the current hash in scope + context Key + + // the base key name for everything except hashes + currentKey string + + // rough approximation of line number + approxLine int + + // A map of 'key.group.names' to whether they were created implicitly. + implicits map[string]bool +} + +type parseError string + +func (pe parseError) Error() string { + return string(pe) +} + +func parse(data string) (p *parser, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + if err, ok = r.(parseError); ok { + return + } + panic(r) + } + }() + + p = &parser{ + mapping: make(map[string]interface{}), + types: make(map[string]tomlType), + lx: lex(data), + ordered: make([]Key, 0), + implicits: make(map[string]bool), + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicf(format string, v ...interface{}) { + msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", + p.approxLine, p.current(), fmt.Sprintf(format, v...)) + panic(parseError(msg)) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + if it.typ == itemError { + p.panicf("%s", it.val) + } + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: + p.approxLine = item.line + p.expect(itemText) + case itemTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemTableEnd, kg.typ) + + p.establishContext(key, false) + p.setType("", tomlHash) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemArrayTableEnd, kg.typ) + + p.establishContext(key, true) + p.setType("", tomlArrayHash) + p.ordered = append(p.ordered, key) + case itemKeyStart: + kname := p.next() + p.approxLine = kname.line + p.currentKey = p.keyString(kname) + + val, typ := p.value(p.next()) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + panic("unreachable") + } +} + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + case itemMultilineString: + trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) + return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + } + p.bug("Expected boolean value, but got '%s'.", it.val) + case itemInteger: + if !numUnderscoresOK(it.val) { + p.panicf("Invalid integer %q: underscores must be surrounded by digits", + it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseInt(val, 10, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Integer '%s' is out of the range of 64-bit "+ + "signed integers.", it.val) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemFloat: + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicf("Invalid float %q: underscores must be "+ + "surrounded by digits", it.val) + } + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicf("Invalid float %q: '.' must be followed "+ + "by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Float '%s' is out of the range of 64-bit "+ + "IEEE-754 floating-point numbers.", it.val) + } else { + p.panicf("Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemDatetime: + var t time.Time + var ok bool + var err error + for _, format := range []string{ + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05", + "2006-01-02", + } { + t, err = time.ParseInLocation(format, it.val, time.Local) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicf("Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) + case itemArray: + array := make([]interface{}, 0) + types := make([]tomlType, 0) + + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it) + array = append(array, val) + types = append(types, typ) + } + return array, p.typeOfArray(types) + case itemInlineTableStart: + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + p.currentKey = "" + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ != itemKeyStart { + p.bug("Expected key start but instead found %q, around line %d", + it.val, p.approxLine) + } + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + // retrieve key + k := p.next() + p.approxLine = k.line + kname := p.keyString(k) + + // retrieve value + p.currentKey = kname + val, typ := p.value(p.next()) + // make sure we keep metadata up to date + p.setType(kname, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + hash[kname] = val + } + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash + } + p.bug("Unexpected value type: %s", it.typ) + panic("unreachable") +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + accept = false + continue + } + accept = true + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// establishContext sets the current context of the parser, +// where the context is either a hash or an array of hashes. Which one is +// set depends on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) establishContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 5) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as "+ + "an array.", keyContext) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var tmpHash interface{} + var ok bool + + hash := p.mapping + keyContext := make(Key, 0) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.bug("Expected hash to have type 'map[string]interface{}', but "+ + "it has '%T' instead.", tmpHash) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Typically, if the given key has already been set, then we have + // to raise an error since duplicate keys are disallowed. However, + // it's possible that a key was previously defined implicitly. In this + // case, it is allowed to be redefined concretely. (See the + // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + hash[key] = value +} + +// setType sets the type of a particular value at a given key. +// It should be called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType) { + keyContext := make(Key, 0, len(p.context)+1) + for _, k := range p.context { + keyContext = append(keyContext, k) + } + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + p.types[keyContext.String()] = typ +} + +// addImplicit sets the given Key as having been created implicitly. +func (p *parser) addImplicit(key Key) { + p.implicits[key.String()] = true +} + +// removeImplicit stops tagging the given key as having been implicitly +// created. +func (p *parser) removeImplicit(key Key) { + p.implicits[key.String()] = false +} + +// isImplicit returns true if the key group pointed to by the key was created +// implicitly. +func (p *parser) isImplicit(key Key) bool { + return p.implicits[key.String()] +} + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) == 0 || s[0] != '\n' { + return s + } + return s[1:] +} + +func stripEscapedWhitespace(s string) string { + esc := strings.Split(s, "\\\n") + if len(esc) > 1 { + for i := 1; i < len(esc); i++ { + esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) + } + } + return strings.Join(esc, "") +} + +func (p *parser) replaceEscapes(str string) string { + var replaced []rune + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + return "" + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the "+ + "lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} + +func isStringType(ty itemType) bool { + return ty == itemString || ty == itemMultilineString || + ty == itemRawString || ty == itemRawMultilineString +} diff --git a/src/dma/vendor/github.com/BurntSushi/toml/session.vim b/src/dma/vendor/github.com/BurntSushi/toml/session.vim new file mode 100644 index 00000000..562164be --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/session.vim @@ -0,0 +1 @@ +au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/src/dma/vendor/github.com/BurntSushi/toml/type_check.go b/src/dma/vendor/github.com/BurntSushi/toml/type_check.go new file mode 100644 index 00000000..c73f8afc --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/type_check.go @@ -0,0 +1,91 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsHash(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} + +// typeOfArray returns a tomlType for an array given a list of types of its +// values. +// +// In the current spec, if an array is homogeneous, then its type is always +// "Array". If the array is not homogeneous, an error is generated. +func (p *parser) typeOfArray(types []tomlType) tomlType { + // Empty arrays are cool. + if len(types) == 0 { + return tomlArray + } + + theType := types[0] + for _, t := range types[1:] { + if !typeEqual(theType, t) { + p.panicf("Array contains values of type '%s' and '%s', but "+ + "arrays must be homogeneous.", theType, t) + } + } + return tomlArray +} diff --git a/src/dma/vendor/github.com/BurntSushi/toml/type_fields.go b/src/dma/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 00000000..608997c2 --- /dev/null +++ b/src/dma/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/src/dma/vendor/github.com/go-redis/redis/.gitignore b/src/dma/vendor/github.com/go-redis/redis/.gitignore new file mode 100644 index 00000000..ebfe903b --- /dev/null +++ b/src/dma/vendor/github.com/go-redis/redis/.gitignore @@ -0,0 +1,2 @@ +*.rdb +testdata/*/ diff --git a/src/dma/vendor/github.com/go-redis/redis/.travis.yml b/src/dma/vendor/github.com/go-redis/redis/.travis.yml new file mode 100644 index 00000000..39ffc2be --- /dev/null +++ b/src/dma/vendor/github.com/go-redis/redis/.travis.yml @@ -0,0 +1,20 @@ +sudo: false +language: go + +services: + - redis-server + +go: + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - tip + +matrix: + allow_failures: + - go: tip + +install: + - go get github.com/onsi/ginkgo + - go get github.com/onsi/gomega diff --git a/src/dma/vendor/github.com/go-redis/redis/CHANGELOG.md b/src/dma/vendor/github.com/go-redis/redis/CHANGELOG.md new file mode 100644 index 00000000..cb0e1b8e --- /dev/null +++ b/src/dma/vendor/github.com/go-redis/redis/CHANGELOG.md @@ -0,0 +1,12 @@ +# Changelog + +## v6.13 + +- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set `HashReplicas = 1000` for better keys distribution between shards. +- Cluster client was optimized to use much less memory when reloading cluster state. +- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout occurres. In most cases it is recommended to use PubSub.Channel instead. +- Dialer.KeepAlive is set to 5 minutes by default. + +## v6.12 + +- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis Servers that don't have cluster mode enabled. See https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup diff --git a/src/dma/vendor/github.com/go-redis/redis/LICENSE b/src/dma/vendor/github.com/go-redis/redis/LICENSE new file mode 100644 index 00000000..298bed9b --- /dev/null +++ b/src/dma/vendor/github.com/go-redis/redis/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/go-redis/redis Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/dma/vendor/github.com/go-redis/redis/Makefile b/src/dma/vendor/github.com/go-redis/redis/Makefile new file mode 100644 index 00000000..1fbdac91 --- /dev/null +++ b/src/dma/vendor/github.com/go-redis/redis/Makefile @@ -0,0 +1,20 @@ +all: testdeps + go test ./... + go test ./... -short -race + env GOOS=linux GOARCH=386 go test ./... + go vet + +testdeps: testdata/redis/src/redis-server + +bench: testdeps + go test ./... -test.run=NONE -test.bench=. -test.benchmem + +.PHONY: all test testdeps bench + +testdata/redis: + mkdir -p $@ + wget -qO- https://github.com/antirez/redis/archive/unstable.tar.gz | tar xvz --strip-components=1 -C $@ + +testdata/redis/src/redis-server: testdata/redis + sed -i.bak 's/libjemalloc.a/libjemalloc.a -lrt/g' $ +} + +func ExampleClient() { + err := client.Set("key", "value", 0).Err() + if err != nil { + panic(err) + } + + val, err := client.Get("key").Result() + if err != nil { + panic(err) + } + fmt.Println("key", val) + + val2, err := client.Get("key2").Result() + if err == redis.Nil { + fmt.Println("key2 does not exist") + } else if err != nil { + panic(err) + } else { + fmt.Println("key2", val2) + } + // Output: key value + // key2 does not exist +} +``` + +## Howto + +Please go through [examples](https://godoc.org/github.com/go-redis/redis#pkg-examples) to get an idea how to use this package. + +## Look and feel + +Some corner cases: + +```go +// SET key value EX 10 NX +set, err := client.SetNX("key", "value", 10*time.Second).Result() + +// SORT list LIMIT 0 2 ASC +vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() + +// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 +vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{ + Min: "-inf", + Max: "+inf", + Offset: 0, + Count: 2, +}).Result() + +// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM +vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result() + +// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" +vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result() +``` + +## Benchmark + +go-redis vs redigo: + +``` +BenchmarkSetGoRedis10Conns64Bytes-4 200000 7621 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns64Bytes-4 200000 7554 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis10Conns1KB-4 200000 7697 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns1KB-4 200000 7688 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis10Conns10KB-4 200000 9214 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns10KB-4 200000 9181 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis10Conns1MB-4 2000 583242 ns/op 2337 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns1MB-4 2000 583089 ns/op 2338 B/op 6 allocs/op +BenchmarkSetRedigo10Conns64Bytes-4 200000 7576 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo100Conns64Bytes-4 200000 7782 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo10Conns1KB-4 200000 7958 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo100Conns1KB-4 200000 7725 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo10Conns10KB-4 100000 18442 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo100Conns10KB-4 100000 18818 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo10Conns1MB-4 2000 668829 ns/op 226 B/op 7 allocs/op +BenchmarkSetRedigo100Conns1MB-4 2000 679542 ns/op 226 B/op 7 allocs/op +``` + +Redis Cluster: + +``` +BenchmarkRedisPing-4 200000 6983 ns/op 116 B/op 4 allocs/op +BenchmarkRedisClusterPing-4 100000 11535 ns/op 117 B/op 4 allocs/op +``` + +## See also + +- [Golang PostgreSQL ORM](https://github.com/go-pg/pg) +- [Golang msgpack](https://github.com/vmihailenco/msgpack) +- [Golang message task queue](https://github.com/go-msgqueue/msgqueue) diff --git a/src/dma/vendor/github.com/go-redis/redis/cluster.go b/src/dma/vendor/github.com/go-redis/redis/cluster.go new file mode 100644 index 00000000..7a1af143 --- /dev/null +++ b/src/dma/vendor/github.com/go-redis/redis/cluster.go @@ -0,0 +1,1624 @@ +package redis + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "math" + "math/rand" + "net" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/go-redis/redis/internal" + "github.com/go-redis/redis/internal/hashtag" + "github.com/go-redis/redis/internal/pool" + "github.com/go-redis/redis/internal/proto" + "github.com/go-redis/redis/internal/singleflight" +) + +var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") + +// ClusterOptions are used to configure a cluster client and should be +// passed to NewClusterClient. +type ClusterOptions struct { + // A seed list of host:port addresses of cluster nodes. + Addrs []string + + // The maximum number of retries before giving up. Command is retried + // on network errors and MOVED/ASK redirects. + // Default is 8 retries. + MaxRedirects int + + // Enables read-only commands on slave nodes. + ReadOnly bool + // Allows routing read-only commands to the closest master or slave node. + // It automatically enables ReadOnly. + RouteByLatency bool + // Allows routing read-only commands to the random master or slave node. + // It automatically enables ReadOnly. + RouteRandomly bool + + // Optional function that returns cluster slots information. + // It is useful to manually create cluster of standalone Redis servers + // and load-balance read/write operations between master and slaves. + // It can use service like ZooKeeper to maintain configuration information + // and Cluster.ReloadState to manually trigger state reloading. + ClusterSlots func() ([]ClusterSlot, error) + + // Following options are copied from Options struct. + + OnConnect func(*Conn) error + + MaxRetries int + MinRetryBackoff time.Duration + MaxRetryBackoff time.Duration + Password string + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + // PoolSize applies per cluster node and not for the whole cluster. + PoolSize int + PoolTimeout time.Duration + IdleTimeout time.Duration + IdleCheckFrequency time.Duration + + TLSConfig *tls.Config +} + +func (opt *ClusterOptions) init() { + if opt.MaxRedirects == -1 { + opt.MaxRedirects = 0 + } else if opt.MaxRedirects == 0 { + opt.MaxRedirects = 8 + } + + if opt.RouteByLatency || opt.RouteRandomly { + opt.ReadOnly = true + } + + switch opt.ReadTimeout { + case -1: + opt.ReadTimeout = 0 + case 0: + opt.ReadTimeout = 3 * time.Second + } + switch opt.WriteTimeout { + case -1: + opt.WriteTimeout = 0 + case 0: + opt.WriteTimeout = opt.ReadTimeout + } + + switch opt.MinRetryBackoff { + case -1: + opt.MinRetryBackoff = 0 + case 0: + opt.MinRetryBackoff = 8 * time.Millisecond + } + switch opt.MaxRetryBackoff { + case -1: + opt.MaxRetryBackoff = 0 + case 0: + opt.MaxRetryBackoff = 512 * time.Millisecond + } +} + +func (opt *ClusterOptions) clientOptions() *Options { + const disableIdleCheck = -1 + + return &Options{ + OnConnect: opt.OnConnect, + + MaxRetries: opt.MaxRetries, + MinRetryBackoff: opt.MinRetryBackoff, + MaxRetryBackoff: opt.MaxRetryBackoff, + Password: opt.Password, + readOnly: opt.ReadOnly, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolSize: opt.PoolSize, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + + IdleCheckFrequency: disableIdleCheck, + + TLSConfig: opt.TLSConfig, + } +} + +//------------------------------------------------------------------------------ + +type clusterNode struct { + Client *Client + + latency uint32 // atomic + generation uint32 // atomic + loading uint32 // atomic +} + +func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode { + opt := clOpt.clientOptions() + opt.Addr = addr + node := clusterNode{ + Client: NewClient(opt), + } + + node.latency = math.MaxUint32 + if clOpt.RouteByLatency { + go node.updateLatency() + } + + return &node +} + +func (n *clusterNode) String() string { + return n.Client.String() +} + +func (n *clusterNode) Close() error { + return n.Client.Close() +} + +func (n *clusterNode) updateLatency() { + const probes = 10 + + var latency uint32 + for i := 0; i < probes; i++ { + start := time.Now() + n.Client.Ping() + probe := uint32(time.Since(start) / time.Microsecond) + latency = (latency + probe) / 2 + } + atomic.StoreUint32(&n.latency, latency) +} + +func (n *clusterNode) Latency() time.Duration { + latency := atomic.LoadUint32(&n.latency) + return time.Duration(latency) * time.Microsecond +} + +func (n *clusterNode) MarkAsLoading() { + atomic.StoreUint32(&n.loading, uint32(time.Now().Unix())) +} + +func (n *clusterNode) Loading() bool { + const minute = int64(time.Minute / time.Second) + + loading := atomic.LoadUint32(&n.loading) + if loading == 0 { + return false + } + if time.Now().Unix()-int64(loading) < minute { + return true + } + atomic.StoreUint32(&n.loading, 0) + return false +} + +func (n *clusterNode) Generation() uint32 { + return atomic.LoadUint32(&n.generation) +} + +func (n *clusterNode) SetGeneration(gen uint32) { + for { + v := atomic.LoadUint32(&n.generation) + if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) { + break + } + } +} + +//------------------------------------------------------------------------------ + +type clusterNodes struct { + opt *ClusterOptions + + mu sync.RWMutex + allAddrs []string + allNodes map[string]*clusterNode + clusterAddrs []string + closed bool + + nodeCreateGroup singleflight.Group + + _generation uint32 // atomic +} + +func newClusterNodes(opt *ClusterOptions) *clusterNodes { + return &clusterNodes{ + opt: opt, + + allAddrs: opt.Addrs, + allNodes: make(map[string]*clusterNode), + } +} + +func (c *clusterNodes) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil + } + c.closed = true + + var firstErr error + for _, node := range c.allNodes { + if err := node.Client.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + + c.allNodes = nil + c.clusterAddrs = nil + + return firstErr +} + +func (c *clusterNodes) Addrs() ([]string, error) { + var addrs []string + c.mu.RLock() + closed := c.closed + if !closed { + if len(c.clusterAddrs) > 0 { + addrs = c.clusterAddrs + } else { + addrs = c.allAddrs + } + } + c.mu.RUnlock() + + if closed { + return nil, pool.ErrClosed + } + if len(addrs) == 0 { + return nil, errClusterNoNodes + } + return addrs, nil +} + +func (c *clusterNodes) NextGeneration() uint32 { + return atomic.AddUint32(&c._generation, 1) +} + +// GC removes unused nodes. +func (c *clusterNodes) GC(generation uint32) { + var collected []*clusterNode + c.mu.Lock() + for addr, node := range c.allNodes { + if node.Generation() >= generation { + continue + } + + c.clusterAddrs = remove(c.clusterAddrs, addr) + delete(c.allNodes, addr) + collected = append(collected, node) + } + c.mu.Unlock() + + for _, node := range collected { + _ = node.Client.Close() + } +} + +func (c *clusterNodes) Get(addr string) (*clusterNode, error) { + var node *clusterNode + var err error + c.mu.RLock() + if c.closed { + err = pool.ErrClosed + } else { + node = c.allNodes[addr] + } + c.mu.RUnlock() + return node, err +} + +func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) { + node, err := c.Get(addr) + if err != nil { + return nil, err + } + if node != nil { + return node, nil + } + + v, err := c.nodeCreateGroup.Do(addr, func() (interface{}, error) { + node := newClusterNode(c.opt, addr) + return node, nil + }) + + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil, pool.ErrClosed + } + + node, ok := c.allNodes[addr] + if ok { + _ = v.(*clusterNode).Close() + return node, err + } + node = v.(*clusterNode) + + c.allAddrs = appendIfNotExists(c.allAddrs, addr) + if err == nil { + c.clusterAddrs = append(c.clusterAddrs, addr) + } + c.allNodes[addr] = node + + return node, err +} + +func (c *clusterNodes) All() ([]*clusterNode, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.closed { + return nil, pool.ErrClosed + } + + cp := make([]*clusterNode, 0, len(c.allNodes)) + for _, node := range c.allNodes { + cp = append(cp, node) + } + return cp, nil +} + +func (c *clusterNodes) Random() (*clusterNode, error) { + addrs, err := c.Addrs() + if err != nil { + return nil, err + } + + n := rand.Intn(len(addrs)) + return c.GetOrCreate(addrs[n]) +} + +//------------------------------------------------------------------------------ + +type clusterSlot struct { + start, end int + nodes []*clusterNode +} + +type clusterSlotSlice []*clusterSlot + +func (p clusterSlotSlice) Len() int { + return len(p) +} + +func (p clusterSlotSlice) Less(i, j int) bool { + return p[i].start < p[j].start +} + +func (p clusterSlotSlice) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +type clusterState struct { + nodes *clusterNodes + Masters []*clusterNode + Slaves []*clusterNode + + slots []*clusterSlot + + generation uint32 + createdAt time.Time +} + +func newClusterState( + nodes *clusterNodes, slots []ClusterSlot, origin string, +) (*clusterState, error) { + c := clusterState{ + nodes: nodes, + + slots: make([]*clusterSlot, 0, len(slots)), + + generation: nodes.NextGeneration(), + createdAt: time.Now(), + } + + isLoopbackOrigin := isLoopbackAddr(origin) + for _, slot := range slots { + var nodes []*clusterNode + for i, slotNode := range slot.Nodes { + addr := slotNode.Addr + if !isLoopbackOrigin && useOriginAddr(origin, addr) { + addr = origin + } + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return nil, err + } + + node.SetGeneration(c.generation) + nodes = append(nodes, node) + + if i == 0 { + c.Masters = appendUniqueNode(c.Masters, node) + } else { + c.Slaves = appendUniqueNode(c.Slaves, node) + } + } + + c.slots = append(c.slots, &clusterSlot{ + start: slot.Start, + end: slot.End, + nodes: nodes, + }) + } + + sort.Sort(clusterSlotSlice(c.slots)) + + time.AfterFunc(time.Minute, func() { + nodes.GC(c.generation) + }) + + return &c, nil +} + +func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) > 0 { + return nodes[0], nil + } + return c.nodes.Random() +} + +func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + switch len(nodes) { + case 0: + return c.nodes.Random() + case 1: + return nodes[0], nil + case 2: + if slave := nodes[1]; !slave.Loading() { + return slave, nil + } + return nodes[0], nil + default: + var slave *clusterNode + for i := 0; i < 10; i++ { + n := rand.Intn(len(nodes)-1) + 1 + slave = nodes[n] + if !slave.Loading() { + break + } + } + return slave, nil + } +} + +func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) { + const threshold = time.Millisecond + + nodes := c.slotNodes(slot) + if len(nodes) == 0 { + return c.nodes.Random() + } + + var node *clusterNode + for _, n := range nodes { + if n.Loading() { + continue + } + if node == nil || node.Latency()-n.Latency() > threshold { + node = n + } + } + return node, nil +} + +func (c *clusterState) slotRandomNode(slot int) *clusterNode { + nodes := c.slotNodes(slot) + n := rand.Intn(len(nodes)) + return nodes[n] +} + +func (c *clusterState) slotNodes(slot int) []*clusterNode { + i := sort.Search(len(c.slots), func(i int) bool { + return c.slots[i].end >= slot + }) + if i >= len(c.slots) { + return nil + } + x := c.slots[i] + if slot >= x.start && slot <= x.end { + return x.nodes + } + return nil +} + +func (c *clusterState) IsConsistent() bool { + if c.nodes.opt.ClusterSlots != nil { + return true + } + return len(c.Masters) <= len(c.Slaves) +} + +//------------------------------------------------------------------------------ + +type clusterStateHolder struct { + load func() (*clusterState, error) + + state atomic.Value + + firstErrMu sync.RWMutex + firstErr error + + reloading uint32 // atomic +} + +func newClusterStateHolder(fn func() (*clusterState, error)) *clusterStateHolder { + return &clusterStateHolder{ + load: fn, + } +} + +func (c *clusterStateHolder) Reload() (*clusterState, error) { + state, err := c.reload() + if err != nil { + return nil, err + } + if !state.IsConsistent() { + time.AfterFunc(time.Second, c.LazyReload) + } + return state, nil +} + +func (c *clusterStateHolder) reload() (*clusterState, error) { + state, err := c.load() + if err != nil { + c.firstErrMu.Lock() + if c.firstErr == nil { + c.firstErr = err + } + c.firstErrMu.Unlock() + return nil, err + } + c.state.Store(state) + return state, nil +} + +func (c *clusterStateHolder) LazyReload() { + if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) { + return + } + go func() { + defer atomic.StoreUint32(&c.reloading, 0) + + for { + state, err := c.reload() + if err != nil { + return + } + time.Sleep(100 * time.Millisecond) + if state.IsConsistent() { + return + } + } + }() +} + +func (c *clusterStateHolder) Get() (*clusterState, error) { + v := c.state.Load() + if v != nil { + state := v.(*clusterState) + if time.Since(state.createdAt) > time.Minute { + c.LazyReload() + } + return state, nil + } + + c.firstErrMu.RLock() + err := c.firstErr + c.firstErrMu.RUnlock() + if err != nil { + return nil, err + } + + return nil, errors.New("redis: cluster has no state") +} + +func (c *clusterStateHolder) ReloadOrGet() (*clusterState, error) { + state, err := c.Reload() + if err == nil { + return state, nil + } + return c.Get() +} + +//------------------------------------------------------------------------------ + +// ClusterClient is a Redis Cluster client representing a pool of zero +// or more underlying connections. It's safe for concurrent use by +// multiple goroutines. +type ClusterClient struct { + cmdable + + ctx context.Context + + opt *ClusterOptions + nodes *clusterNodes + state *clusterStateHolder + cmdsInfoCache *cmdsInfoCache + + process func(Cmder) error + processPipeline func([]Cmder) error + processTxPipeline func([]Cmder) error +} + +// NewClusterClient returns a Redis Cluster client as described in +// http://redis.io/topics/cluster-spec. +func NewClusterClient(opt *ClusterOptions) *ClusterClient { + opt.init() + + c := &ClusterClient{ + opt: opt, + nodes: newClusterNodes(opt), + } + c.state = newClusterStateHolder(c.loadState) + c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo) + + c.process = c.defaultProcess + c.processPipeline = c.defaultProcessPipeline + c.processTxPipeline = c.defaultProcessTxPipeline + + c.init() + + _, _ = c.state.Reload() + _, _ = c.cmdsInfoCache.Get() + + if opt.IdleCheckFrequency > 0 { + go c.reaper(opt.IdleCheckFrequency) + } + + return c +} + +// ReloadState reloads cluster state. It calls ClusterSlots func +// to get cluster slots information. +func (c *ClusterClient) ReloadState() error { + _, err := c.state.Reload() + return err +} + +func (c *ClusterClient) init() { + c.cmdable.setProcessor(c.Process) +} + +func (c *ClusterClient) Context() context.Context { + if c.ctx != nil { + return c.ctx + } + return context.Background() +} + +func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient { + if ctx == nil { + panic("nil context") + } + c2 := c.copy() + c2.ctx = ctx + return c2 +} + +func (c *ClusterClient) copy() *ClusterClient { + cp := *c + cp.init() + return &cp +} + +// Options returns read-only Options that were used to create the client. +func (c *ClusterClient) Options() *ClusterOptions { + return c.opt +} + +func (c *ClusterClient) retryBackoff(attempt int) time.Duration { + return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) +} + +func (c *ClusterClient) cmdsInfo() (map[string]*CommandInfo, error) { + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + for _, addr := range addrs { + node, err := c.nodes.Get(addr) + if err != nil { + return nil, err + } + if node == nil { + continue + } + + info, err := node.Client.Command().Result() + if err == nil { + return info, nil + } + if firstErr == nil { + firstErr = err + } + } + return nil, firstErr +} + +func (c *ClusterClient) cmdInfo(name string) *CommandInfo { + cmdsInfo, err := c.cmdsInfoCache.Get() + if err != nil { + return nil + } + + info := cmdsInfo[name] + if info == nil { + internal.Logf("info for cmd=%s not found", name) + } + return info +} + +func cmdSlot(cmd Cmder, pos int) int { + if pos == 0 { + return hashtag.RandomSlot() + } + firstKey := cmd.stringArg(pos) + return hashtag.Slot(firstKey) +} + +func (c *ClusterClient) cmdSlot(cmd Cmder) int { + cmdInfo := c.cmdInfo(cmd.Name()) + return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) +} + +func (c *ClusterClient) cmdSlotAndNode(cmd Cmder) (int, *clusterNode, error) { + state, err := c.state.Get() + if err != nil { + return 0, nil, err + } + + cmdInfo := c.cmdInfo(cmd.Name()) + slot := cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) + + if cmdInfo != nil && cmdInfo.ReadOnly && c.opt.ReadOnly { + if c.opt.RouteByLatency { + node, err := state.slotClosestNode(slot) + return slot, node, err + } + + if c.opt.RouteRandomly { + node := state.slotRandomNode(slot) + return slot, node, nil + } + + node, err := state.slotSlaveNode(slot) + return slot, node, err + } + + node, err := state.slotMasterNode(slot) + return slot, node, err +} + +func (c *ClusterClient) slotMasterNode(slot int) (*clusterNode, error) { + state, err := c.state.Get() + if err != nil { + return nil, err + } + + nodes := state.slotNodes(slot) + if len(nodes) > 0 { + return nodes[0], nil + } + return c.nodes.Random() +} + +func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error { + if len(keys) == 0 { + return fmt.Errorf("redis: Watch requires at least one key") + } + + slot := hashtag.Slot(keys[0]) + for _, key := range keys[1:] { + if hashtag.Slot(key) != slot { + err := fmt.Errorf("redis: Watch requires all keys to be in the same slot") + return err + } + } + + node, err := c.slotMasterNode(slot) + if err != nil { + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + err = node.Client.Watch(fn, keys...) + if err == nil { + break + } + + if internal.IsRetryableError(err, true) { + c.state.LazyReload() + continue + } + + moved, ask, addr := internal.IsMovedError(err) + if moved || ask { + c.state.LazyReload() + node, err = c.nodes.GetOrCreate(addr) + if err != nil { + return err + } + continue + } + + if err == pool.ErrClosed { + node, err = c.slotMasterNode(slot) + if err != nil { + return err + } + continue + } + + return err + } + + return err +} + +// Close closes the cluster client, releasing any open resources. +// +// It is rare to Close a ClusterClient, as the ClusterClient is meant +// to be long-lived and shared between many goroutines. +func (c *ClusterClient) Close() error { + return c.nodes.Close() +} + +func (c *ClusterClient) WrapProcess( + fn func(oldProcess func(Cmder) error) func(Cmder) error, +) { + c.process = fn(c.process) +} + +func (c *ClusterClient) Process(cmd Cmder) error { + return c.process(cmd) +} + +func (c *ClusterClient) defaultProcess(cmd Cmder) error { + var node *clusterNode + var ask bool + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + if node == nil { + var err error + _, node, err = c.cmdSlotAndNode(cmd) + if err != nil { + cmd.setErr(err) + break + } + } + + var err error + if ask { + pipe := node.Client.Pipeline() + _ = pipe.Process(NewCmd("ASKING")) + _ = pipe.Process(cmd) + _, err = pipe.Exec() + _ = pipe.Close() + ask = false + } else { + err = node.Client.Process(cmd) + } + + // If there is no error - we are done. + if err == nil { + break + } + + // If slave is loading - read from master. + if c.opt.ReadOnly && internal.IsLoadingError(err) { + node.MarkAsLoading() + continue + } + + if internal.IsRetryableError(err, true) { + c.state.LazyReload() + + // First retry the same node. + if attempt == 0 { + continue + } + + // Second try random node. + node, err = c.nodes.Random() + if err != nil { + break + } + continue + } + + var moved bool + var addr string + moved, ask, addr = internal.IsMovedError(err) + if moved || ask { + c.state.LazyReload() + + node, err = c.nodes.GetOrCreate(addr) + if err != nil { + break + } + continue + } + + if err == pool.ErrClosed { + node = nil + continue + } + + break + } + + return cmd.Err() +} + +// ForEachMaster concurrently calls the fn on each master node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error { + state, err := c.state.ReloadOrGet() + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + for _, master := range state.Masters { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(master) + } + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachSlave concurrently calls the fn on each slave node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error { + state, err := c.state.ReloadOrGet() + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + for _, slave := range state.Slaves { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(slave) + } + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachNode concurrently calls the fn on each known node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error { + state, err := c.state.ReloadOrGet() + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + worker := func(node *clusterNode) { + defer wg.Done() + err := fn(node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + } + + for _, node := range state.Masters { + wg.Add(1) + go worker(node) + } + for _, node := range state.Slaves { + wg.Add(1) + go worker(node) + } + + wg.Wait() + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// PoolStats returns accumulated connection pool stats. +func (c *ClusterClient) PoolStats() *PoolStats { + var acc PoolStats + + state, _ := c.state.Get() + if state == nil { + return &acc + } + + for _, node := range state.Masters { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.FreeConns += s.FreeConns + acc.StaleConns += s.StaleConns + } + + for _, node := range state.Slaves { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.FreeConns += s.FreeConns + acc.StaleConns += s.StaleConns + } + + return &acc +} + +func (c *ClusterClient) loadState() (*clusterState, error) { + if c.opt.ClusterSlots != nil { + slots, err := c.opt.ClusterSlots() + if err != nil { + return nil, err + } + return newClusterState(c.nodes, slots, "") + } + + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + for _, addr := range addrs { + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + slots, err := node.Client.ClusterSlots().Result() + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + return newClusterState(c.nodes, slots, node.Client.opt.Addr) + } + + return nil, firstErr +} + +// reaper closes idle connections to the cluster. +func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) { + ticker := time.NewTicker(idleCheckFrequency) + defer ticker.Stop() + + for range ticker.C { + nodes, err := c.nodes.All() + if err != nil { + break + } + + for _, node := range nodes { + _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns() + if err != nil { + internal.Logf("ReapStaleConns failed: %s", err) + } + } + } +} + +func (c *ClusterClient) Pipeline() Pipeliner { + pipe := Pipeline{ + exec: c.processPipeline, + } + pipe.statefulCmdable.setProcessor(pipe.Process) + return &pipe +} + +func (c *ClusterClient) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipeline().Pipelined(fn) +} + +func (c *ClusterClient) WrapProcessPipeline( + fn func(oldProcess func([]Cmder) error) func([]Cmder) error, +) { + c.processPipeline = fn(c.processPipeline) +} + +func (c *ClusterClient) defaultProcessPipeline(cmds []Cmder) error { + cmdsMap, err := c.mapCmdsByNode(cmds) + if err != nil { + setCmdsErr(cmds, err) + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + failedCmds := make(map[*clusterNode][]Cmder) + + for node, cmds := range cmdsMap { + cn, err := node.Client.getConn() + if err != nil { + if err == pool.ErrClosed { + c.remapCmds(cmds, failedCmds) + } else { + setCmdsErr(cmds, err) + } + continue + } + + err = c.pipelineProcessCmds(node, cn, cmds, failedCmds) + if err == nil || internal.IsRedisError(err) { + node.Client.connPool.Put(cn) + } else { + node.Client.connPool.Remove(cn) + } + } + + if len(failedCmds) == 0 { + break + } + cmdsMap = failedCmds + } + + return firstCmdsErr(cmds) +} + +func (c *ClusterClient) mapCmdsByNode(cmds []Cmder) (map[*clusterNode][]Cmder, error) { + state, err := c.state.Get() + if err != nil { + setCmdsErr(cmds, err) + return nil, err + } + + cmdsMap := make(map[*clusterNode][]Cmder) + cmdsAreReadOnly := c.cmdsAreReadOnly(cmds) + for _, cmd := range cmds { + var node *clusterNode + var err error + if cmdsAreReadOnly { + _, node, err = c.cmdSlotAndNode(cmd) + } else { + slot := c.cmdSlot(cmd) + node, err = state.slotMasterNode(slot) + } + if err != nil { + return nil, err + } + cmdsMap[node] = append(cmdsMap[node], cmd) + } + return cmdsMap, nil +} + +func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool { + for _, cmd := range cmds { + cmdInfo := c.cmdInfo(cmd.Name()) + if cmdInfo == nil || !cmdInfo.ReadOnly { + return false + } + } + return true +} + +func (c *ClusterClient) remapCmds(cmds []Cmder, failedCmds map[*clusterNode][]Cmder) { + remappedCmds, err := c.mapCmdsByNode(cmds) + if err != nil { + setCmdsErr(cmds, err) + return + } + + for node, cmds := range remappedCmds { + failedCmds[node] = cmds + } +} + +func (c *ClusterClient) pipelineProcessCmds( + node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder, +) error { + cn.SetWriteTimeout(c.opt.WriteTimeout) + + err := writeCmd(cn, cmds...) + if err != nil { + setCmdsErr(cmds, err) + failedCmds[node] = cmds + return err + } + + // Set read timeout for all commands. + cn.SetReadTimeout(c.opt.ReadTimeout) + + return c.pipelineReadCmds(cn, cmds, failedCmds) +} + +func (c *ClusterClient) pipelineReadCmds( + cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder, +) error { + for _, cmd := range cmds { + err := cmd.readReply(cn) + if err == nil { + continue + } + + if c.checkMovedErr(cmd, err, failedCmds) { + continue + } + + if internal.IsRedisError(err) { + continue + } + + return err + } + return nil +} + +func (c *ClusterClient) checkMovedErr( + cmd Cmder, err error, failedCmds map[*clusterNode][]Cmder, +) bool { + moved, ask, addr := internal.IsMovedError(err) + + if moved { + c.state.LazyReload() + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return false + } + + failedCmds[node] = append(failedCmds[node], cmd) + return true + } + + if ask { + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return false + } + + failedCmds[node] = append(failedCmds[node], NewCmd("ASKING"), cmd) + return true + } + + return false +} + +// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. +func (c *ClusterClient) TxPipeline() Pipeliner { + pipe := Pipeline{ + exec: c.processTxPipeline, + } + pipe.statefulCmdable.setProcessor(pipe.Process) + return &pipe +} + +func (c *ClusterClient) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) { + return c.TxPipeline().Pipelined(fn) +} + +func (c *ClusterClient) defaultProcessTxPipeline(cmds []Cmder) error { + state, err := c.state.Get() + if err != nil { + return err + } + + cmdsMap := c.mapCmdsBySlot(cmds) + for slot, cmds := range cmdsMap { + node, err := state.slotMasterNode(slot) + if err != nil { + setCmdsErr(cmds, err) + continue + } + cmdsMap := map[*clusterNode][]Cmder{node: cmds} + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + failedCmds := make(map[*clusterNode][]Cmder) + + for node, cmds := range cmdsMap { + cn, err := node.Client.getConn() + if err != nil { + if err == pool.ErrClosed { + c.remapCmds(cmds, failedCmds) + } else { + setCmdsErr(cmds, err) + } + continue + } + + err = c.txPipelineProcessCmds(node, cn, cmds, failedCmds) + if err == nil || internal.IsRedisError(err) { + node.Client.connPool.Put(cn) + } else { + node.Client.connPool.Remove(cn) + } + } + + if len(failedCmds) == 0 { + break + } + cmdsMap = failedCmds + } + } + + return firstCmdsErr(cmds) +} + +func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder { + cmdsMap := make(map[int][]Cmder) + for _, cmd := range cmds { + slot := c.cmdSlot(cmd) + cmdsMap[slot] = append(cmdsMap[slot], cmd) + } + return cmdsMap +} + +func (c *ClusterClient) txPipelineProcessCmds( + node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder, +) error { + cn.SetWriteTimeout(c.opt.WriteTimeout) + if err := txPipelineWriteMulti(cn, cmds); err != nil { + setCmdsErr(cmds, err) + failedCmds[node] = cmds + return err + } + + // Set read timeout for all commands. + cn.SetReadTimeout(c.opt.ReadTimeout) + + if err := c.txPipelineReadQueued(cn, cmds, failedCmds); err != nil { + setCmdsErr(cmds, err) + return err + } + + return pipelineReadCmds(cn, cmds) +} + +func (c *ClusterClient) txPipelineReadQueued( + cn *pool.Conn, cmds []Cmder, failedCmds map[*clusterNode][]Cmder, +) error { + // Parse queued replies. + var statusCmd StatusCmd + if err := statusCmd.readReply(cn); err != nil { + return err + } + + for _, cmd := range cmds { + err := statusCmd.readReply(cn) + if err == nil { + continue + } + + if c.checkMovedErr(cmd, err, failedCmds) || internal.IsRedisError(err) { + continue + } + + return err + } + + // Parse number of replies. + line, err := cn.Rd.ReadLine() + if err != nil { + if err == Nil { + err = TxFailedErr + } + return err + } + + switch line[0] { + case proto.ErrorReply: + err := proto.ParseErrorReply(line) + for _, cmd := range cmds { + if !c.checkMovedErr(cmd, err, failedCmds) { + break + } + } + return err + case proto.ArrayReply: + // ok + default: + err := fmt.Errorf("redis: expected '*', but got line %q", line) + return err + } + + return nil +} + +func (c *ClusterClient) pubSub(channels []string) *PubSub { + var node *clusterNode + pubsub := &PubSub{ + opt: c.opt.clientOptions(), + + newConn: func(channels []string) (*pool.Conn, error) { + if node == nil { + var slot int + if len(channels) > 0 { + slot = hashtag.Slot(channels[0]) + } else { + slot = -1 + } + + masterNode, err := c.slotMasterNode(slot) + if err != nil { + return nil, err + } + node = masterNode + } + return node.Client.newConn() + }, + closeConn: func(cn *pool.Conn) error { + return node.Client.connPool.CloseConn(cn) + }, + } + pubsub.init() + return pubsub +} + +// Subscribe subscribes the client to the specified channels. +// Channels can be omitted to create empty subscription. +func (c *ClusterClient) Subscribe(channels ...string) *PubSub { + pubsub := c.pubSub(channels) + if len(channels) > 0 { + _ = pubsub.Subscribe(channels...) + } + return pubsub +} + +// PSubscribe subscribes the client to the given patterns. +// Patterns can be omitted to create empty subscription. +func (c *ClusterClient) PSubscribe(channels ...string) *PubSub { + pubsub := c.pubSub(channels) + if len(channels) > 0 { + _ = pubsub.PSubscribe(channels...) + } + return pubsub +} + +func useOriginAddr(originAddr, nodeAddr string) bool { + nodeHost, nodePort, err := net.SplitHostPort(nodeAddr) + if err != nil { + return false + } + + nodeIP := net.ParseIP(nodeHost) + if nodeIP == nil { + return false + } + + if !nodeIP.IsLoopback() { + return false + } + + _, originPort, err := net.SplitHostPort(originAddr) + if err != nil { + return false + } + + return nodePort == originPort +} + +func isLoopbackAddr(addr string) bool { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return false + } + + ip := net.ParseIP(host) + if ip == nil { + return false + } + + return ip.IsLoopback() +} + +func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode { + for _, n := range nodes { + if n == node { + return nodes + } + } + return append(nodes, node) +} + +func appendIfNotExists(ss []string, es ...string) []string { +loop: + for _, e := range es { + for _, s := range ss { + if s == e { + continue loop + } + } + ss = append(ss, e) + } + return ss +} + +func remove(ss []string, es ...string) []string { + if len(es) == 0 { + return ss[:0] + } + for _, e := range es { + for i, s := range ss { + if s == e { + ss = append(ss[:i], ss[i+1:]...) + break + } + } + } + return ss +} diff --git a/src/dma/vendor/github.com/go-redis/redis/cluster_commands.go b/src/dma/vendor/github.com/go-redis/redis/cluster_commands.go new file mode 100644 index 00000000..dff62c90 --- /dev/null +++ b/src/dma/vendor/github.com/go-redis/redis/cluster_commands.go @@ -0,0 +1,22 @@ +package redis + +import "sync/atomic" + +func (c *ClusterClient) DBSize() *IntCmd { + cmd := NewIntCmd("dbsize") + var size int64 + err := c.ForEachMaster(func(master *Client) error { + n, err := master.DBSize().Result() + if err != nil { + return err + } + atomic.AddInt64(&size, n) + return nil + }) + if err != nil { + cmd.setErr(err) + return cmd + } + cmd.val = size + return cmd +} diff --git a/src/dma/vendor/github.com/go-redis/redis/command.go b/src/dma/vendor/github.com/go-redis/redis/command.go new file mode 100644 index 00000000..11472bec --- /dev/null +++ b/src/dma/vendor/github.com/go-redis/redis/command.go @@ -0,0 +1,1225 @@ +package redis + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "time" + + "github.com/go-redis/redis/internal" + "github.com/go-redis/redis/internal/pool" + "github.com/go-redis/redis/internal/proto" + "github.com/go-redis/redis/internal/util" +) + +type Cmder interface { + Name() string + Args() []interface{} + stringArg(int) string + + readReply(*pool.Conn) error + setErr(error) + + readTimeout() *time.Duration + + Err() error + fmt.Stringer +} + +func setCmdsErr(cmds []Cmder, e error) { + for _, cmd := range cmds { + if cmd.Err() == nil { + cmd.setErr(e) + } + } +} + +func firstCmdsErr(cmds []Cmder) error { + for _, cmd := range cmds { + if err := cmd.Err(); err != nil { + return err + } + } + return nil +} + +func writeCmd(cn *pool.Conn, cmds ...Cmder) error { + cn.Wb.Reset() + for _, cmd := range cmds { + if err := cn.Wb.Append(cmd.Args()); err != nil { + return err + } + } + + _, err := cn.Write(cn.Wb.Bytes()) + return err +} + +func cmdString(cmd Cmder, val interface{}) string { + var ss []string + for _, arg := range cmd.Args() { + ss = append(ss, fmt.Sprint(arg)) + } + s := strings.Join(ss, " ") + if err := cmd.Err(); err != nil { + return s + ": " + err.Error() + } + if val != nil { + switch vv := val.(type) { + case []byte: + return s + ": " + string(vv) + default: + return s + ": " + fmt.Sprint(val) + } + } + return s + +} + +func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int { + switch cmd.Name() { + case "eval", "evalsha": + if cmd.stringArg(2) != "0" { + return 3 + } + + return 0 + case "publish": + return 1 + } + if info == nil { + return 0 + } + return int(info.FirstKeyPos) +} + +//------------------------------------------------------------------------------ + +type baseCmd struct { + _args []interface{} + err error + + _readTimeout *time.Duration +} + +var _ Cmder = (*Cmd)(nil) + +func (cmd *baseCmd) Err() error { + return cmd.err +} + +func (cmd *baseCmd) Args() []interface{} { + return cmd._args +} + +func (cmd *baseCmd) stringArg(pos int) string { + if pos < 0 || pos >= len(cmd._args) { + return "" + } + s, _ := cmd._args[pos].(string) + return s +} + +func (cmd *baseCmd) Name() string { + if len(cmd._args) > 0 { + // Cmd name must be lower cased. + s := internal.ToLower(cmd.stringArg(0)) + cmd._args[0] = s + return s + } + return "" +} + +func (cmd *baseCmd) readTimeout() *time.Duration { + return cmd._readTimeout +} + +func (cmd *baseCmd) setReadTimeout(d time.Duration) { + cmd._readTimeout = &d +} + +func (cmd *baseCmd) setErr(e error) { + cmd.err = e +} + +//------------------------------------------------------------------------------ + +type Cmd struct { + baseCmd + + val interface{} +} + +func NewCmd(args ...interface{}) *Cmd { + return &Cmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *Cmd) Val() interface{} { + return cmd.val +} + +func (cmd *Cmd) Result() (interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *Cmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *Cmd) readReply(cn *pool.Conn) error { + cmd.val, cmd.err = cn.Rd.ReadReply(sliceParser) + if cmd.err != nil { + return cmd.err + } + if b, ok := cmd.val.([]byte); ok { + // Bytes must be copied, because underlying memory is reused. + cmd.val = string(b) + } + return nil +} + +//------------------------------------------------------------------------------ + +type SliceCmd struct { + baseCmd + + val []interface{} +} + +var _ Cmder = (*SliceCmd)(nil) + +func NewSliceCmd(args ...interface{}) *SliceCmd { + return &SliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *SliceCmd) Val() []interface{} { + return cmd.val +} + +func (cmd *SliceCmd) Result() ([]interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *SliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *SliceCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(sliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]interface{}) + return nil +} + +//------------------------------------------------------------------------------ + +type StatusCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StatusCmd)(nil) + +func NewStatusCmd(args ...interface{}) *StatusCmd { + return &StatusCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StatusCmd) Val() string { + return cmd.val +} + +func (cmd *StatusCmd) Result() (string, error) { + return cmd.val, cmd.err +} + +func (cmd *StatusCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StatusCmd) readReply(cn *pool.Conn) error { + cmd.val, cmd.err = cn.Rd.ReadStringReply() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type IntCmd struct { + baseCmd + + val int64 +} + +var _ Cmder = (*IntCmd)(nil) + +func NewIntCmd(args ...interface{}) *IntCmd { + return &IntCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *IntCmd) Val() int64 { + return cmd.val +} + +func (cmd *IntCmd) Result() (int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntCmd) readReply(cn *pool.Conn) error { + cmd.val, cmd.err = cn.Rd.ReadIntReply() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type DurationCmd struct { + baseCmd + + val time.Duration + precision time.Duration +} + +var _ Cmder = (*DurationCmd)(nil) + +func NewDurationCmd(precision time.Duration, args ...interface{}) *DurationCmd { + return &DurationCmd{ + baseCmd: baseCmd{_args: args}, + precision: precision, + } +} + +func (cmd *DurationCmd) Val() time.Duration { + return cmd.val +} + +func (cmd *DurationCmd) Result() (time.Duration, error) { + return cmd.val, cmd.err +} + +func (cmd *DurationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *DurationCmd) readReply(cn *pool.Conn) error { + var n int64 + n, cmd.err = cn.Rd.ReadIntReply() + if cmd.err != nil { + return cmd.err + } + cmd.val = time.Duration(n) * cmd.precision + return nil +} + +//------------------------------------------------------------------------------ + +type TimeCmd struct { + baseCmd + + val time.Time +} + +var _ Cmder = (*TimeCmd)(nil) + +func NewTimeCmd(args ...interface{}) *TimeCmd { + return &TimeCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *TimeCmd) Val() time.Time { + return cmd.val +} + +func (cmd *TimeCmd) Result() (time.Time, error) { + return cmd.val, cmd.err +} + +func (cmd *TimeCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *TimeCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(timeParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(time.Time) + return nil +} + +//------------------------------------------------------------------------------ + +type BoolCmd struct { + baseCmd + + val bool +} + +var _ Cmder = (*BoolCmd)(nil) + +func NewBoolCmd(args ...interface{}) *BoolCmd { + return &BoolCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *BoolCmd) Val() bool { + return cmd.val +} + +func (cmd *BoolCmd) Result() (bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolCmd) String() string { + return cmdString(cmd, cmd.val) +} + +var ok = []byte("OK") + +func (cmd *BoolCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadReply(nil) + // `SET key value NX` returns nil when key already exists. But + // `SETNX key value` returns bool (0/1). So convert nil to bool. + // TODO: is this okay? + if cmd.err == Nil { + cmd.val = false + cmd.err = nil + return nil + } + if cmd.err != nil { + return cmd.err + } + switch v := v.(type) { + case int64: + cmd.val = v == 1 + return nil + case []byte: + cmd.val = bytes.Equal(v, ok) + return nil + default: + cmd.err = fmt.Errorf("got %T, wanted int64 or string", v) + return cmd.err + } +} + +//------------------------------------------------------------------------------ + +type StringCmd struct { + baseCmd + + val []byte +} + +var _ Cmder = (*StringCmd)(nil) + +func NewStringCmd(args ...interface{}) *StringCmd { + return &StringCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringCmd) Val() string { + return util.BytesToString(cmd.val) +} + +func (cmd *StringCmd) Result() (string, error) { + return cmd.Val(), cmd.err +} + +func (cmd *StringCmd) Bytes() ([]byte, error) { + return cmd.val, cmd.err +} + +func (cmd *StringCmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseInt(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseUint(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseFloat(cmd.Val(), 64) +} + +func (cmd *StringCmd) Scan(val interface{}) error { + if cmd.err != nil { + return cmd.err + } + return proto.Scan(cmd.val, val) +} + +func (cmd *StringCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringCmd) readReply(cn *pool.Conn) error { + cmd.val, cmd.err = cn.Rd.ReadBytesReply() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type FloatCmd struct { + baseCmd + + val float64 +} + +var _ Cmder = (*FloatCmd)(nil) + +func NewFloatCmd(args ...interface{}) *FloatCmd { + return &FloatCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *FloatCmd) Val() float64 { + return cmd.val +} + +func (cmd *FloatCmd) Result() (float64, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *FloatCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatCmd) readReply(cn *pool.Conn) error { + cmd.val, cmd.err = cn.Rd.ReadFloatReply() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type StringSliceCmd struct { + baseCmd + + val []string +} + +var _ Cmder = (*StringSliceCmd)(nil) + +func NewStringSliceCmd(args ...interface{}) *StringSliceCmd { + return &StringSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringSliceCmd) Val() []string { + return cmd.val +} + +func (cmd *StringSliceCmd) Result() ([]string, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *StringSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringSliceCmd) ScanSlice(container interface{}) error { + return proto.ScanSlice(cmd.Val(), container) +} + +func (cmd *StringSliceCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(stringSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]string) + return nil +} + +//------------------------------------------------------------------------------ + +type BoolSliceCmd struct { + baseCmd + + val []bool +} + +var _ Cmder = (*BoolSliceCmd)(nil) + +func NewBoolSliceCmd(args ...interface{}) *BoolSliceCmd { + return &BoolSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *BoolSliceCmd) Val() []bool { + return cmd.val +} + +func (cmd *BoolSliceCmd) Result() ([]bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolSliceCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(boolSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]bool) + return nil +} + +//------------------------------------------------------------------------------ + +type StringStringMapCmd struct { + baseCmd + + val map[string]string +} + +var _ Cmder = (*StringStringMapCmd)(nil) + +func NewStringStringMapCmd(args ...interface{}) *StringStringMapCmd { + return &StringStringMapCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringStringMapCmd) Val() map[string]string { + return cmd.val +} + +func (cmd *StringStringMapCmd) Result() (map[string]string, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStringMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStringMapCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(stringStringMapParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]string) + return nil +} + +//------------------------------------------------------------------------------ + +type StringIntMapCmd struct { + baseCmd + + val map[string]int64 +} + +var _ Cmder = (*StringIntMapCmd)(nil) + +func NewStringIntMapCmd(args ...interface{}) *StringIntMapCmd { + return &StringIntMapCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringIntMapCmd) Val() map[string]int64 { + return cmd.val +} + +func (cmd *StringIntMapCmd) Result() (map[string]int64, error) { + return cmd.val, cmd.err +} + +func (cmd *StringIntMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringIntMapCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(stringIntMapParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]int64) + return nil +} + +//------------------------------------------------------------------------------ + +type StringStructMapCmd struct { + baseCmd + + val map[string]struct{} +} + +var _ Cmder = (*StringStructMapCmd)(nil) + +func NewStringStructMapCmd(args ...interface{}) *StringStructMapCmd { + return &StringStructMapCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringStructMapCmd) Val() map[string]struct{} { + return cmd.val +} + +func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStructMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStructMapCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(stringStructMapParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]struct{}) + return nil +} + +//------------------------------------------------------------------------------ + +type XStream struct { + Stream string + Messages []*XMessage +} + +type XMessage struct { + ID string + Values map[string]interface{} +} + +//------------------------------------------------------------------------------ + +type XStreamSliceCmd struct { + baseCmd + + val []*XStream +} + +var _ Cmder = (*XStreamSliceCmd)(nil) + +func NewXStreamSliceCmd(args ...interface{}) *XStreamSliceCmd { + return &XStreamSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *XStreamSliceCmd) Val() []*XStream { + return cmd.val +} + +func (cmd *XStreamSliceCmd) Result() ([]*XStream, error) { + return cmd.val, cmd.err +} + +func (cmd *XStreamSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XStreamSliceCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(xStreamSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]*XStream) + return nil +} + +// Implements proto.MultiBulkParse +func xStreamSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + xx := make([]*XStream, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadArrayReply(xStreamParser) + if err != nil { + return nil, err + } + xx[i] = v.(*XStream) + } + return xx, nil +} + +// Implements proto.MultiBulkParse +func xStreamParser(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + + stream, err := rd.ReadStringReply() + if err != nil { + return nil, err + } + + v, err := rd.ReadArrayReply(xMessageSliceParser) + if err != nil { + return nil, err + } + + return &XStream{ + Stream: stream, + Messages: v.([]*XMessage), + }, nil +} + +//------------------------------------------------------------------------------ + +type XMessageSliceCmd struct { + baseCmd + + val []*XMessage +} + +var _ Cmder = (*XMessageSliceCmd)(nil) + +func NewXMessageSliceCmd(args ...interface{}) *XMessageSliceCmd { + return &XMessageSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *XMessageSliceCmd) Val() []*XMessage { + return cmd.val +} + +func (cmd *XMessageSliceCmd) Result() ([]*XMessage, error) { + return cmd.val, cmd.err +} + +func (cmd *XMessageSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XMessageSliceCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(xMessageSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]*XMessage) + return nil +} + +// Implements proto.MultiBulkParse +func xMessageSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + msgs := make([]*XMessage, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadArrayReply(xMessageParser) + if err != nil { + return nil, err + } + msgs[i] = v.(*XMessage) + } + return msgs, nil +} + +// Implements proto.MultiBulkParse +func xMessageParser(rd *proto.Reader, n int64) (interface{}, error) { + id, err := rd.ReadStringReply() + if err != nil { + return nil, err + } + + v, err := rd.ReadArrayReply(xKeyValueParser) + if err != nil { + return nil, err + } + + return &XMessage{ + ID: id, + Values: v.(map[string]interface{}), + }, nil +} + +// Implements proto.MultiBulkParse +func xKeyValueParser(rd *proto.Reader, n int64) (interface{}, error) { + values := make(map[string]interface{}, n) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadStringReply() + if err != nil { + return nil, err + } + + value, err := rd.ReadStringReply() + if err != nil { + return nil, err + } + + values[key] = value + } + return values, nil +} + +//------------------------------------------------------------------------------ + +type ZSliceCmd struct { + baseCmd + + val []Z +} + +var _ Cmder = (*ZSliceCmd)(nil) + +func NewZSliceCmd(args ...interface{}) *ZSliceCmd { + return &ZSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *ZSliceCmd) Val() []Z { + return cmd.val +} + +func (cmd *ZSliceCmd) Result() ([]Z, error) { + return cmd.val, cmd.err +} + +func (cmd *ZSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZSliceCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(zSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]Z) + return nil +} + +//------------------------------------------------------------------------------ + +type ScanCmd struct { + baseCmd + + page []string + cursor uint64 + + process func(cmd Cmder) error +} + +var _ Cmder = (*ScanCmd)(nil) + +func NewScanCmd(process func(cmd Cmder) error, args ...interface{}) *ScanCmd { + return &ScanCmd{ + baseCmd: baseCmd{_args: args}, + process: process, + } +} + +func (cmd *ScanCmd) Val() (keys []string, cursor uint64) { + return cmd.page, cmd.cursor +} + +func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) { + return cmd.page, cmd.cursor, cmd.err +} + +func (cmd *ScanCmd) String() string { + return cmdString(cmd, cmd.page) +} + +func (cmd *ScanCmd) readReply(cn *pool.Conn) error { + cmd.page, cmd.cursor, cmd.err = cn.Rd.ReadScanReply() + return cmd.err +} + +// Iterator creates a new ScanIterator. +func (cmd *ScanCmd) Iterator() *ScanIterator { + return &ScanIterator{ + cmd: cmd, + } +} + +//------------------------------------------------------------------------------ + +type ClusterNode struct { + Id string + Addr string +} + +type ClusterSlot struct { + Start int + End int + Nodes []ClusterNode +} + +type ClusterSlotsCmd struct { + baseCmd + + val []ClusterSlot +} + +var _ Cmder = (*ClusterSlotsCmd)(nil) + +func NewClusterSlotsCmd(args ...interface{}) *ClusterSlotsCmd { + return &ClusterSlotsCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *ClusterSlotsCmd) Val() []ClusterSlot { + return cmd.val +} + +func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ClusterSlotsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClusterSlotsCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(clusterSlotsParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]ClusterSlot) + return nil +} + +//------------------------------------------------------------------------------ + +// GeoLocation is used with GeoAdd to add geospatial location. +type GeoLocation struct { + Name string + Longitude, Latitude, Dist float64 + GeoHash int64 +} + +// GeoRadiusQuery is used with GeoRadius to query geospatial index. +type GeoRadiusQuery struct { + Radius float64 + // Can be m, km, ft, or mi. Default is km. + Unit string + WithCoord bool + WithDist bool + WithGeoHash bool + Count int + // Can be ASC or DESC. Default is no sort order. + Sort string + Store string + StoreDist string +} + +type GeoLocationCmd struct { + baseCmd + + q *GeoRadiusQuery + locations []GeoLocation +} + +var _ Cmder = (*GeoLocationCmd)(nil) + +func NewGeoLocationCmd(q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd { + args = append(args, q.Radius) + if q.Unit != "" { + args = append(args, q.Unit) + } else { + args = append(args, "km") + } + if q.WithCoord { + args = append(args, "withcoord") + } + if q.WithDist { + args = append(args, "withdist") + } + if q.WithGeoHash { + args = append(args, "withhash") + } + if q.Count > 0 { + args = append(args, "count", q.Count) + } + if q.Sort != "" { + args = append(args, q.Sort) + } + if q.Store != "" { + args = append(args, "store") + args = append(args, q.Store) + } + if q.StoreDist != "" { + args = append(args, "storedist") + args = append(args, q.StoreDist) + } + return &GeoLocationCmd{ + baseCmd: baseCmd{_args: args}, + q: q, + } +} + +func (cmd *GeoLocationCmd) Val() []GeoLocation { + return cmd.locations +} + +func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) { + return cmd.locations, cmd.err +} + +func (cmd *GeoLocationCmd) String() string { + return cmdString(cmd, cmd.locations) +} + +func (cmd *GeoLocationCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q)) + if cmd.err != nil { + return cmd.err + } + cmd.locations = v.([]GeoLocation) + return nil +} + +//------------------------------------------------------------------------------ + +type GeoPos struct { + Longitude, Latitude float64 +} + +type GeoPosCmd struct { + baseCmd + + positions []*GeoPos +} + +var _ Cmder = (*GeoPosCmd)(nil) + +func NewGeoPosCmd(args ...interface{}) *GeoPosCmd { + return &GeoPosCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *GeoPosCmd) Val() []*GeoPos { + return cmd.positions +} + +func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *GeoPosCmd) String() string { + return cmdString(cmd, cmd.positions) +} + +func (cmd *GeoPosCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(geoPosSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.positions = v.([]*GeoPos) + return nil +} + +//------------------------------------------------------------------------------ + +type CommandInfo struct { + Name string + Arity int8 + Flags []string + FirstKeyPos int8 + LastKeyPos int8 + StepCount int8 + ReadOnly bool +} + +type CommandsInfoCmd struct { + baseCmd + + val map[string]*CommandInfo +} + +var _ Cmder = (*CommandsInfoCmd)(nil) + +func NewCommandsInfoCmd(args ...interface{}) *CommandsInfoCmd { + return &CommandsInfoCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo { + return cmd.val +} + +func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *CommandsInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *CommandsInfoCmd) readReply(cn *pool.Conn) error { + var v interface{} + v, cmd.err = cn.Rd.ReadArrayReply(commandInfoSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]*CommandInfo) + return nil +} + +//------------------------------------------------------------------------------ + +type cmdsInfoCache struct { + fn func() (map[string]*CommandInfo, error) + + once internal.Once + cmds map[string]*CommandInfo +} + +func newCmdsInfoCache(fn func() (map[string]*CommandInfo, error)) *cmdsInfoCache { + return &cmdsInfoCache{ + fn: fn, + } +} + +func (c *cmdsInfoCache) Get() (map[string]*CommandInfo, error) { + err := c.once.Do(func() error { + cmds, err := c.fn() + if err != nil { + return err + } + c.cmds = cmds + return nil + }) + return c.cmds, err +} diff --git a/src/dma/vendor/github.com/go-redis/redis/commands.go b/src/dma/vendor/github.com/go-redis/redis/commands.go new file mode 100644 index 00000000..dddf8acd --- /dev/null +++ b/src/dma/vendor/github.com/go-redis/redis/commands.go @@ -0,0 +1,2337 @@ +package redis + +import ( + "errors" + "io" + "time" + + "github.com/go-redis/redis/internal" +) + +func readTimeout(timeout time.Duration) time.Duration { + if timeout == 0 { + return 0 + } + return timeout + 10*time.Second +} + +func usePrecise(dur time.Duration) bool { + return dur < time.Second || dur%time.Second != 0 +} + +func formatMs(dur time.Duration) int64 { + if dur > 0 && dur < time.Millisecond { + internal.Logf( + "specified duration is %s, but minimal supported value is %s", + dur, time.Millisecond, + ) + } + return int64(dur / time.Millisecond) +} + +func formatSec(dur time.Duration) int64 { + if dur > 0 && dur < time.Second { + internal.Logf( + "specified duration is %s, but minimal supported value is %s", + dur, time.Second, + ) + } + return int64(dur / time.Second) +} + +func appendArgs(dst, src []interface{}) []interface{} { + if len(src) == 1 { + if ss, ok := src[0].([]string); ok { + for _, s := range ss { + dst = append(dst, s) + } + return dst + } + } + + for _, v := range src { + dst = append(dst, v) + } + return dst +} + +type Cmdable interface { + Pipeline() Pipeliner + Pipelined(fn func(Pipeliner) error) ([]Cmder, error) + + TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) + TxPipeline() Pipeliner + + Command() *CommandsInfoCmd + ClientGetName() *StringCmd + Echo(message interface{}) *StringCmd + Ping() *StatusCmd + Quit() *StatusCmd + Del(keys ...string) *IntCmd + Unlink(keys ...string) *IntCmd + Dump(key string) *StringCmd + Exists(keys ...string) *IntCmd + Expire(key string, expiration time.Duration) *BoolCmd + ExpireAt(key string, tm time.Time) *BoolCmd + Keys(pattern string) *StringSliceCmd + Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd + Move(key string, db int64) *BoolCmd + ObjectRefCount(key string) *IntCmd + ObjectEncoding(key string) *StringCmd + ObjectIdleTime(key string) *DurationCmd + Persist(key string) *BoolCmd + PExpire(key string, expiration time.Duration) *BoolCmd + PExpireAt(key string, tm time.Time) *BoolCmd + PTTL(key string) *DurationCmd + RandomKey() *StringCmd + Rename(key, newkey string) *StatusCmd + RenameNX(key, newkey string) *BoolCmd + Restore(key string, ttl time.Duration, value string) *StatusCmd + RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd + Sort(key string, sort *Sort) *StringSliceCmd + SortStore(key, store string, sort *Sort) *IntCmd + SortInterfaces(key string, sort *Sort) *SliceCmd + Touch(keys ...string) *IntCmd + TTL(key string) *DurationCmd + Type(key string) *StatusCmd + Scan(cursor uint64, match string, count int64) *ScanCmd + SScan(key string, cursor uint64, match string, count int64) *ScanCmd + HScan(key string, cursor uint64, match string, count int64) *ScanCmd + ZScan(key string, cursor uint64, match string, count int64) *ScanCmd + Append(key, value string) *IntCmd + BitCount(key string, bitCount *BitCount) *IntCmd + BitOpAnd(destKey string, keys ...string) *IntCmd + BitOpOr(destKey string, keys ...string) *IntCmd + BitOpXor(destKey string, keys ...string) *IntCmd + BitOpNot(destKey string, key string) *IntCmd + BitPos(key string, bit int64, pos ...int64) *IntCmd + Decr(key string) *IntCmd + DecrBy(key string, decrement int64) *IntCmd + Get(key string) *StringCmd + GetBit(key string, offset int64) *IntCmd + GetRange(key string, start, end int64) *StringCmd + GetSet(key string, value interface{}) *StringCmd + Incr(key string) *IntCmd + IncrBy(key string, value int64) *IntCmd + IncrByFloat(key string, value float64) *FloatCmd + MGet(keys ...string) *SliceCmd + MSet(pairs ...interface{}) *StatusCmd + MSetNX(pairs ...interface{}) *BoolCmd + Set(key string, value interface{}, expiration time.Duration) *StatusCmd + SetBit(key string, offset int64, value int) *IntCmd + SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd + SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd + SetRange(key string, offset int64, value string) *IntCmd + StrLen(key string) *IntCmd + HDel(key string, fields ...string) *IntCmd + HExists(key, field string) *BoolCmd + HGet(key, field string) *StringCmd + HGetAll(key string) *StringStringMapCmd + HIncrBy(key, field string, incr int64) *IntCmd + HIncrByFloat(key, field string, incr float64) *FloatCmd + HKeys(key string) *StringSliceCmd + HLen(key string) *IntCmd + HMGet(key string, fields ...string) *SliceCmd + HMSet(key string, fields map[string]interface{}) *StatusCmd + HSet(key, field string, value interface{}) *BoolCmd + HSetNX(key, field string, value interface{}) *BoolCmd + HVals(key string) *StringSliceCmd + BLPop(timeout time.Duration, keys ...string) *StringSliceCmd + BRPop(timeout time.Duration, keys ...string) *StringSliceCmd + BRPopLPush(source, destination string, timeout time.Duration) *StringCmd + LIndex(key string, index int64) *StringCmd + LInsert(key, op string, pivot, value interface{}) *IntCmd + LInsertBefore(key string, pivot, value interface{}) *IntCmd + LInsertAfter(key string, pivot, value interface{}) *IntCmd + LLen(key string) *IntCmd + LPop(key string) *StringCmd + LPush(key string, values ...interface{}) *IntCmd + LPushX(key string, value interface{}) *IntCmd + LRange(key string, start, stop int64) *StringSliceCmd + LRem(key string, count int64, value interface{}) *IntCmd + LSet(key string, index int64, value interface{}) *StatusCmd + LTrim(key string, start, stop int64) *StatusCmd + RPop(key string) *StringCmd + RPopLPush(source, destination string) *StringCmd + RPush(key string, values ...interface{}) *IntCmd + RPushX(key string, value interface{}) *IntCmd + SAdd(key string, members ...interface{}) *IntCmd + SCard(key string) *IntCmd + SDiff(keys ...string) *StringSliceCmd + SDiffStore(destination string, keys ...string) *IntCmd + SInter(keys ...string) *StringSliceCmd + SInterStore(destination string, keys ...string) *IntCmd + SIsMember(key string, member interface{}) *BoolCmd + SMembers(key string) *StringSliceCmd + SMembersMap(key string) *StringStructMapCmd + SMove(source, destination string, member interface{}) *BoolCmd + SPop(key string) *StringCmd + SPopN(key string, count int64) *StringSliceCmd + SRandMember(key string) *StringCmd + SRandMemberN(key string, count int64) *StringSliceCmd + SRem(key string, members ...interface{}) *IntCmd + SUnion(keys ...string) *StringSliceCmd + SUnionStore(destination string, keys ...string) *IntCmd + XAdd(stream, id string, els map[string]interface{}) *StringCmd + XAddExt(opt *XAddExt) *StringCmd + XLen(key string) *IntCmd + XRange(stream, start, stop string) *XMessageSliceCmd + XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd + XRevRange(stream string, start, stop string) *XMessageSliceCmd + XRevRangeN(stream string, start, stop string, count int64) *XMessageSliceCmd + XRead(streams ...string) *XStreamSliceCmd + XReadN(count int64, streams ...string) *XStreamSliceCmd + XReadExt(opt *XReadExt) *XStreamSliceCmd + ZAdd(key string, members ...Z) *IntCmd + ZAddNX(key string, members ...Z) *IntCmd + ZAddXX(key string, members ...Z) *IntCmd + ZAddCh(key string, members ...Z) *IntCmd + ZAddNXCh(key string, members ...Z) *IntCmd + ZAddXXCh(key string, members ...Z) *IntCmd + ZIncr(key string, member Z) *FloatCmd + ZIncrNX(key string, member Z) *FloatCmd + ZIncrXX(key string, member Z) *FloatCmd + ZCard(key string) *IntCmd + ZCount(key, min, max string) *IntCmd + ZLexCount(key, min, max string) *IntCmd + ZIncrBy(key string, increment float64, member string) *FloatCmd + ZInterStore(destination string, store ZStore, keys ...string) *IntCmd + ZRange(key string, start, stop int64) *StringSliceCmd + ZRangeWithScores(key string, start, stop int64) *ZSliceCmd + ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd + ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd + ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd + ZRank(key, member string) *IntCmd + ZRem(key string, members ...interface{}) *IntCmd + ZRemRangeByRank(key string, start, stop int64) *IntCmd + ZRemRangeByScore(key, min, max string) *IntCmd + ZRemRangeByLex(key, min, max string) *IntCmd + ZRevRange(key string, start, stop int64) *StringSliceCmd + ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd + ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd + ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd + ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd + ZRevRank(key, member string) *IntCmd + ZScore(key, member string) *FloatCmd + ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd + PFAdd(key string, els ...interface{}) *IntCmd + PFCount(keys ...string) *IntCmd + PFMerge(dest string, keys ...string) *StatusCmd + BgRewriteAOF() *StatusCmd + BgSave() *StatusCmd + ClientKill(ipPort string) *StatusCmd + ClientKillByFilter(keys ...string) *IntCmd + ClientList() *StringCmd + ClientPause(dur time.Duration) *BoolCmd + ConfigGet(parameter string) *SliceCmd + ConfigResetStat() *StatusCmd + ConfigSet(parameter, value string) *StatusCmd + ConfigRewrite() *StatusCmd + DBSize() *IntCmd + FlushAll() *StatusCmd + FlushAllAsync() *StatusCmd + FlushDB() *StatusCmd + FlushDBAsync() *StatusCmd + Info(section ...string) *StringCmd + LastSave() *IntCmd + Save() *StatusCmd + Shutdown() *StatusCmd + ShutdownSave() *StatusCmd + ShutdownNoSave() *StatusCmd + SlaveOf(host, port string) *StatusCmd + Time() *TimeCmd + Eval(script string, keys []string, args ...interface{}) *Cmd + EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd + ScriptExists(hashes ...string) *BoolSliceCmd + ScriptFlush() *StatusCmd + ScriptKill() *StatusCmd + ScriptLoad(script string) *StringCmd + DebugObject(key string) *StringCmd + Publish(channel string, message interface{}) *IntCmd + PubSubChannels(pattern string) *StringSliceCmd + PubSubNumSub(channels ...string) *StringIntMapCmd + PubSubNumPat() *IntCmd + ClusterSlots() *ClusterSlotsCmd + ClusterNodes() *StringCmd + ClusterMeet(host, port string) *StatusCmd + ClusterForget(nodeID string) *StatusCmd + ClusterReplicate(nodeID string) *StatusCmd + ClusterResetSoft() *StatusCmd + ClusterResetHard() *StatusCmd + ClusterInfo() *StringCmd + ClusterKeySlot(key string) *IntCmd + ClusterCountFailureReports(nodeID string) *IntCmd + ClusterCountKeysInSlot(slot int) *IntCmd + ClusterDelSlots(slots ...int) *StatusCmd + ClusterDelSlotsRange(min, max int) *StatusCmd + ClusterSaveConfig() *StatusCmd + ClusterSlaves(nodeID string) *StringSliceCmd + ClusterFailover() *StatusCmd + ClusterAddSlots(slots ...int) *StatusCmd + ClusterAddSlotsRange(min, max int) *StatusCmd + GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd + GeoPos(key string, members ...string) *GeoPosCmd + GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusRO(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusByMemberRO(key, member string, query *GeoRadiusQuery) *GeoLocationCmd + GeoDist(key string, member1, member2, unit string) *FloatCmd + GeoHash(key string, members ...string) *StringSliceCmd + ReadOnly() *StatusCmd + ReadWrite() *StatusCmd + MemoryUsage(key string, samples ...int) *IntCmd +} + +type StatefulCmdable interface { + Cmdable + Auth(password string) *StatusCmd + Select(index int) *StatusCmd + SwapDB(index1, index2 int) *StatusCmd + ClientSetName(name string) *BoolCmd +} + +var _ Cmdable = (*Client)(nil) +var _ Cmdable = (*Tx)(nil) +var _ Cmdable = (*Ring)(nil) +var _ Cmdable = (*ClusterClient)(nil) + +type cmdable struct { + process func(cmd Cmder) error +} + +func (c *cmdable) setProcessor(fn func(Cmder) error) { + c.process = fn +} + +type statefulCmdable struct { + cmdable + process func(cmd Cmder) error +} + +func (c *statefulCmdable) setProcessor(fn func(Cmder) error) { + c.process = fn + c.cmdable.setProcessor(fn) +} + +//------------------------------------------------------------------------------ + +func (c *statefulCmdable) Auth(password string) *StatusCmd { + cmd := NewStatusCmd("auth", password) + c.process(cmd) + return cmd +} + +func (c *cmdable) Echo(message interface{}) *StringCmd { + cmd := NewStringCmd("echo", message) + c.process(cmd) + return cmd +} + +func (c *cmdable) Ping() *StatusCmd { + cmd := NewStatusCmd("ping") + c.process(cmd) + return cmd +} + +func (c *cmdable) Wait(numSlaves int, timeout time.Duration) *IntCmd { + cmd := NewIntCmd("wait", numSlaves, int(timeout/time.Millisecond)) + c.process(cmd) + return cmd +} + +func (c *cmdable) Quit() *StatusCmd { + panic("not implemented") +} + +func (c *statefulCmdable) Select(index int) *StatusCmd { + cmd := NewStatusCmd("select", index) + c.process(cmd) + return cmd +} + +func (c *statefulCmdable) SwapDB(index1, index2 int) *StatusCmd { + cmd := NewStatusCmd("swapdb", index1, index2) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) Command() *CommandsInfoCmd { + cmd := NewCommandsInfoCmd("command") + c.process(cmd) + return cmd +} + +func (c *cmdable) Del(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "del" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Unlink(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "unlink" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Dump(key string) *StringCmd { + cmd := NewStringCmd("dump", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Exists(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "exists" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Expire(key string, expiration time.Duration) *BoolCmd { + cmd := NewBoolCmd("expire", key, formatSec(expiration)) + c.process(cmd) + return cmd +} + +func (c *cmdable) ExpireAt(key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd("expireat", key, tm.Unix()) + c.process(cmd) + return cmd +} + +func (c *cmdable) Keys(pattern string) *StringSliceCmd { + cmd := NewStringSliceCmd("keys", pattern) + c.process(cmd) + return cmd +} + +func (c *cmdable) Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd { + cmd := NewStatusCmd( + "migrate", + host, + port, + key, + db, + formatMs(timeout), + ) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) Move(key string, db int64) *BoolCmd { + cmd := NewBoolCmd("move", key, db) + c.process(cmd) + return cmd +} + +func (c *cmdable) ObjectRefCount(key string) *IntCmd { + cmd := NewIntCmd("object", "refcount", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) ObjectEncoding(key string) *StringCmd { + cmd := NewStringCmd("object", "encoding", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) ObjectIdleTime(key string) *DurationCmd { + cmd := NewDurationCmd(time.Second, "object", "idletime", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Persist(key string) *BoolCmd { + cmd := NewBoolCmd("persist", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) PExpire(key string, expiration time.Duration) *BoolCmd { + cmd := NewBoolCmd("pexpire", key, formatMs(expiration)) + c.process(cmd) + return cmd +} + +func (c *cmdable) PExpireAt(key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd( + "pexpireat", + key, + tm.UnixNano()/int64(time.Millisecond), + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) PTTL(key string) *DurationCmd { + cmd := NewDurationCmd(time.Millisecond, "pttl", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) RandomKey() *StringCmd { + cmd := NewStringCmd("randomkey") + c.process(cmd) + return cmd +} + +func (c *cmdable) Rename(key, newkey string) *StatusCmd { + cmd := NewStatusCmd("rename", key, newkey) + c.process(cmd) + return cmd +} + +func (c *cmdable) RenameNX(key, newkey string) *BoolCmd { + cmd := NewBoolCmd("renamenx", key, newkey) + c.process(cmd) + return cmd +} + +func (c *cmdable) Restore(key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + "restore", + key, + formatMs(ttl), + value, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + "restore", + key, + formatMs(ttl), + value, + "replace", + ) + c.process(cmd) + return cmd +} + +type Sort struct { + By string + Offset, Count int64 + Get []string + Order string + Alpha bool +} + +func (sort *Sort) args(key string) []interface{} { + args := []interface{}{"sort", key} + if sort.By != "" { + args = append(args, "by", sort.By) + } + if sort.Offset != 0 || sort.Count != 0 { + args = append(args, "limit", sort.Offset, sort.Count) + } + for _, get := range sort.Get { + args = append(args, "get", get) + } + if sort.Order != "" { + args = append(args, sort.Order) + } + if sort.Alpha { + args = append(args, "alpha") + } + return args +} + +func (c *cmdable) Sort(key string, sort *Sort) *StringSliceCmd { + cmd := NewStringSliceCmd(sort.args(key)...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SortStore(key, store string, sort *Sort) *IntCmd { + args := sort.args(key) + if store != "" { + args = append(args, "store", store) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SortInterfaces(key string, sort *Sort) *SliceCmd { + cmd := NewSliceCmd(sort.args(key)...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Touch(keys ...string) *IntCmd { + args := make([]interface{}, len(keys)+1) + args[0] = "touch" + for i, key := range keys { + args[i+1] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) TTL(key string) *DurationCmd { + cmd := NewDurationCmd(time.Second, "ttl", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Type(key string) *StatusCmd { + cmd := NewStatusCmd("type", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Scan(cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"scan", cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SScan(key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"sscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HScan(key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"hscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZScan(key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"zscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) Append(key, value string) *IntCmd { + cmd := NewIntCmd("append", key, value) + c.process(cmd) + return cmd +} + +type BitCount struct { + Start, End int64 +} + +func (c *cmdable) BitCount(key string, bitCount *BitCount) *IntCmd { + args := []interface{}{"bitcount", key} + if bitCount != nil { + args = append( + args, + bitCount.Start, + bitCount.End, + ) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) bitOp(op, destKey string, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "bitop" + args[1] = op + args[2] = destKey + for i, key := range keys { + args[3+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) BitOpAnd(destKey string, keys ...string) *IntCmd { + return c.bitOp("and", destKey, keys...) +} + +func (c *cmdable) BitOpOr(destKey string, keys ...string) *IntCmd { + return c.bitOp("or", destKey, keys...) +} + +func (c *cmdable) BitOpXor(destKey string, keys ...string) *IntCmd { + return c.bitOp("xor", destKey, keys...) +} + +func (c *cmdable) BitOpNot(destKey string, key string) *IntCmd { + return c.bitOp("not", destKey, key) +} + +func (c *cmdable) BitPos(key string, bit int64, pos ...int64) *IntCmd { + args := make([]interface{}, 3+len(pos)) + args[0] = "bitpos" + args[1] = key + args[2] = bit + switch len(pos) { + case 0: + case 1: + args[3] = pos[0] + case 2: + args[3] = pos[0] + args[4] = pos[1] + default: + panic("too many arguments") + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Decr(key string) *IntCmd { + cmd := NewIntCmd("decr", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) DecrBy(key string, decrement int64) *IntCmd { + cmd := NewIntCmd("decrby", key, decrement) + c.process(cmd) + return cmd +} + +// Redis `GET key` command. It returns redis.Nil error when key does not exist. +func (c *cmdable) Get(key string) *StringCmd { + cmd := NewStringCmd("get", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) GetBit(key string, offset int64) *IntCmd { + cmd := NewIntCmd("getbit", key, offset) + c.process(cmd) + return cmd +} + +func (c *cmdable) GetRange(key string, start, end int64) *StringCmd { + cmd := NewStringCmd("getrange", key, start, end) + c.process(cmd) + return cmd +} + +func (c *cmdable) GetSet(key string, value interface{}) *StringCmd { + cmd := NewStringCmd("getset", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) Incr(key string) *IntCmd { + cmd := NewIntCmd("incr", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) IncrBy(key string, value int64) *IntCmd { + cmd := NewIntCmd("incrby", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) IncrByFloat(key string, value float64) *FloatCmd { + cmd := NewFloatCmd("incrbyfloat", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) MGet(keys ...string) *SliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "mget" + for i, key := range keys { + args[1+i] = key + } + cmd := NewSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) MSet(pairs ...interface{}) *StatusCmd { + args := make([]interface{}, 1, 1+len(pairs)) + args[0] = "mset" + args = appendArgs(args, pairs) + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) MSetNX(pairs ...interface{}) *BoolCmd { + args := make([]interface{}, 1, 1+len(pairs)) + args[0] = "msetnx" + args = appendArgs(args, pairs) + cmd := NewBoolCmd(args...) + c.process(cmd) + return cmd +} + +// Redis `SET key value [expiration]` command. +// +// Use expiration for `SETEX`-like behavior. +// Zero expiration means the key has no expiration time. +func (c *cmdable) Set(key string, value interface{}, expiration time.Duration) *StatusCmd { + args := make([]interface{}, 3, 4) + args[0] = "set" + args[1] = key + args[2] = value + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(expiration)) + } else { + args = append(args, "ex", formatSec(expiration)) + } + } + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SetBit(key string, offset int64, value int) *IntCmd { + cmd := NewIntCmd( + "setbit", + key, + offset, + value, + ) + c.process(cmd) + return cmd +} + +// Redis `SET key value [expiration] NX` command. +// +// Zero expiration means the key has no expiration time. +func (c *cmdable) SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + if expiration == 0 { + // Use old `SETNX` to support old Redis versions. + cmd = NewBoolCmd("setnx", key, value) + } else { + if usePrecise(expiration) { + cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "nx") + } else { + cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "nx") + } + } + c.process(cmd) + return cmd +} + +// Redis `SET key value [expiration] XX` command. +// +// Zero expiration means the key has no expiration time. +func (c *cmdable) SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + if expiration == 0 { + cmd = NewBoolCmd("set", key, value, "xx") + } else { + if usePrecise(expiration) { + cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "xx") + } else { + cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "xx") + } + } + c.process(cmd) + return cmd +} + +func (c *cmdable) SetRange(key string, offset int64, value string) *IntCmd { + cmd := NewIntCmd("setrange", key, offset, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) StrLen(key string) *IntCmd { + cmd := NewIntCmd("strlen", key) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) HDel(key string, fields ...string) *IntCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hdel" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HExists(key, field string) *BoolCmd { + cmd := NewBoolCmd("hexists", key, field) + c.process(cmd) + return cmd +} + +func (c *cmdable) HGet(key, field string) *StringCmd { + cmd := NewStringCmd("hget", key, field) + c.process(cmd) + return cmd +} + +func (c *cmdable) HGetAll(key string) *StringStringMapCmd { + cmd := NewStringStringMapCmd("hgetall", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) HIncrBy(key, field string, incr int64) *IntCmd { + cmd := NewIntCmd("hincrby", key, field, incr) + c.process(cmd) + return cmd +} + +func (c *cmdable) HIncrByFloat(key, field string, incr float64) *FloatCmd { + cmd := NewFloatCmd("hincrbyfloat", key, field, incr) + c.process(cmd) + return cmd +} + +func (c *cmdable) HKeys(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("hkeys", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) HLen(key string) *IntCmd { + cmd := NewIntCmd("hlen", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) HMGet(key string, fields ...string) *SliceCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hmget" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HMSet(key string, fields map[string]interface{}) *StatusCmd { + args := make([]interface{}, 2+len(fields)*2) + args[0] = "hmset" + args[1] = key + i := 2 + for k, v := range fields { + args[i] = k + args[i+1] = v + i += 2 + } + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HSet(key, field string, value interface{}) *BoolCmd { + cmd := NewBoolCmd("hset", key, field, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) HSetNX(key, field string, value interface{}) *BoolCmd { + cmd := NewBoolCmd("hsetnx", key, field, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) HVals(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("hvals", key) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) BLPop(timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "blpop" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(timeout) + cmd := NewStringSliceCmd(args...) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) BRPop(timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "brpop" + for i, key := range keys { + args[1+i] = key + } + args[len(keys)+1] = formatSec(timeout) + cmd := NewStringSliceCmd(args...) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) BRPopLPush(source, destination string, timeout time.Duration) *StringCmd { + cmd := NewStringCmd( + "brpoplpush", + source, + destination, + formatSec(timeout), + ) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) LIndex(key string, index int64) *StringCmd { + cmd := NewStringCmd("lindex", key, index) + c.process(cmd) + return cmd +} + +func (c *cmdable) LInsert(key, op string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd("linsert", key, op, pivot, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LInsertBefore(key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd("linsert", key, "before", pivot, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LInsertAfter(key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd("linsert", key, "after", pivot, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LLen(key string) *IntCmd { + cmd := NewIntCmd("llen", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) LPop(key string) *StringCmd { + cmd := NewStringCmd("lpop", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) LPush(key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "lpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) LPushX(key string, value interface{}) *IntCmd { + cmd := NewIntCmd("lpushx", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LRange(key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd( + "lrange", + key, + start, + stop, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) LRem(key string, count int64, value interface{}) *IntCmd { + cmd := NewIntCmd("lrem", key, count, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LSet(key string, index int64, value interface{}) *StatusCmd { + cmd := NewStatusCmd("lset", key, index, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LTrim(key string, start, stop int64) *StatusCmd { + cmd := NewStatusCmd( + "ltrim", + key, + start, + stop, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPop(key string) *StringCmd { + cmd := NewStringCmd("rpop", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPopLPush(source, destination string) *StringCmd { + cmd := NewStringCmd("rpoplpush", source, destination) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPush(key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "rpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPushX(key string, value interface{}) *IntCmd { + cmd := NewIntCmd("rpushx", key, value) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) SAdd(key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "sadd" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SCard(key string) *IntCmd { + cmd := NewIntCmd("scard", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) SDiff(keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sdiff" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SDiffStore(destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sdiffstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SInter(keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sinter" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SInterStore(destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sinterstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SIsMember(key string, member interface{}) *BoolCmd { + cmd := NewBoolCmd("sismember", key, member) + c.process(cmd) + return cmd +} + +// Redis `SMEMBERS key` command output as a slice +func (c *cmdable) SMembers(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("smembers", key) + c.process(cmd) + return cmd +} + +// Redis `SMEMBERS key` command output as a map +func (c *cmdable) SMembersMap(key string) *StringStructMapCmd { + cmd := NewStringStructMapCmd("smembers", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) SMove(source, destination string, member interface{}) *BoolCmd { + cmd := NewBoolCmd("smove", source, destination, member) + c.process(cmd) + return cmd +} + +// Redis `SPOP key` command. +func (c *cmdable) SPop(key string) *StringCmd { + cmd := NewStringCmd("spop", key) + c.process(cmd) + return cmd +} + +// Redis `SPOP key count` command. +func (c *cmdable) SPopN(key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd("spop", key, count) + c.process(cmd) + return cmd +} + +// Redis `SRANDMEMBER key` command. +func (c *cmdable) SRandMember(key string) *StringCmd { + cmd := NewStringCmd("srandmember", key) + c.process(cmd) + return cmd +} + +// Redis `SRANDMEMBER key count` command. +func (c *cmdable) SRandMemberN(key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd("srandmember", key, count) + c.process(cmd) + return cmd +} + +func (c *cmdable) SRem(key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "srem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SUnion(keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sunion" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SUnionStore(destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sunionstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +type XAddExt struct { + Stream string + MaxLen int64 // MAXLEN N + MaxLenApprox int64 // MAXLEN ~ N + ID string + Values map[string]interface{} +} + +func (c *cmdable) XAddExt(opt *XAddExt) *StringCmd { + a := make([]interface{}, 0, 6+len(opt.Values)*2) + a = append(a, "xadd") + a = append(a, opt.Stream) + if opt.MaxLen > 0 { + a = append(a, "maxlen", opt.MaxLen) + } else if opt.MaxLenApprox > 0 { + a = append(a, "maxlen", "~", opt.MaxLenApprox) + } + if opt.ID != "" { + a = append(a, opt.ID) + } else { + a = append(a, "*") + } + for k, v := range opt.Values { + a = append(a, k) + a = append(a, v) + } + + cmd := NewStringCmd(a...) + c.process(cmd) + return cmd +} + +func (c *cmdable) XAdd(stream, id string, values map[string]interface{}) *StringCmd { + return c.XAddExt(&XAddExt{ + Stream: stream, + ID: id, + Values: values, + }) +} + +func (c *cmdable) XLen(key string) *IntCmd { + cmd := NewIntCmd("xlen", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRange(stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrange", stream, start, stop) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrange", stream, start, stop, "count", count) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRevRange(stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRevRangeN(stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop, "count", count) + c.process(cmd) + return cmd +} + +type XReadExt struct { + Streams []string + Count int64 + Block time.Duration +} + +func (c *cmdable) XReadExt(opt *XReadExt) *XStreamSliceCmd { + a := make([]interface{}, 0, 5+len(opt.Streams)) + a = append(a, "xread") + if opt != nil { + if opt.Count > 0 { + a = append(a, "count") + a = append(a, opt.Count) + } + if opt.Block >= 0 { + a = append(a, "block") + a = append(a, int64(opt.Block/time.Millisecond)) + } + } + a = append(a, "streams") + for _, s := range opt.Streams { + a = append(a, s) + } + + cmd := NewXStreamSliceCmd(a...) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRead(streams ...string) *XStreamSliceCmd { + return c.XReadExt(&XReadExt{ + Streams: streams, + Block: -1, + }) +} + +func (c *cmdable) XReadN(count int64, streams ...string) *XStreamSliceCmd { + return c.XReadExt(&XReadExt{ + Streams: streams, + Count: count, + Block: -1, + }) +} + +func (c *cmdable) XReadBlock(block time.Duration, streams ...string) *XStreamSliceCmd { + return c.XReadExt(&XReadExt{ + Streams: streams, + Block: block, + }) +} + +//------------------------------------------------------------------------------ + +// Z represents sorted set member. +type Z struct { + Score float64 + Member interface{} +} + +// ZStore is used as an arg to ZInterStore and ZUnionStore. +type ZStore struct { + Weights []float64 + // Can be SUM, MIN or MAX. + Aggregate string +} + +func (c *cmdable) zAdd(a []interface{}, n int, members ...Z) *IntCmd { + for i, m := range members { + a[n+2*i] = m.Score + a[n+2*i+1] = m.Member + } + cmd := NewIntCmd(a...) + c.process(cmd) + return cmd +} + +// Redis `ZADD key score member [score member ...]` command. +func (c *cmdable) ZAdd(key string, members ...Z) *IntCmd { + const n = 2 + a := make([]interface{}, n+2*len(members)) + a[0], a[1] = "zadd", key + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key NX score member [score member ...]` command. +func (c *cmdable) ZAddNX(key string, members ...Z) *IntCmd { + const n = 3 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2] = "zadd", key, "nx" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key XX score member [score member ...]` command. +func (c *cmdable) ZAddXX(key string, members ...Z) *IntCmd { + const n = 3 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2] = "zadd", key, "xx" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key CH score member [score member ...]` command. +func (c *cmdable) ZAddCh(key string, members ...Z) *IntCmd { + const n = 3 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2] = "zadd", key, "ch" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key NX CH score member [score member ...]` command. +func (c *cmdable) ZAddNXCh(key string, members ...Z) *IntCmd { + const n = 4 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key XX CH score member [score member ...]` command. +func (c *cmdable) ZAddXXCh(key string, members ...Z) *IntCmd { + const n = 4 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch" + return c.zAdd(a, n, members...) +} + +func (c *cmdable) zIncr(a []interface{}, n int, members ...Z) *FloatCmd { + for i, m := range members { + a[n+2*i] = m.Score + a[n+2*i+1] = m.Member + } + cmd := NewFloatCmd(a...) + c.process(cmd) + return cmd +} + +// Redis `ZADD key INCR score member` command. +func (c *cmdable) ZIncr(key string, member Z) *FloatCmd { + const n = 3 + a := make([]interface{}, n+2) + a[0], a[1], a[2] = "zadd", key, "incr" + return c.zIncr(a, n, member) +} + +// Redis `ZADD key NX INCR score member` command. +func (c *cmdable) ZIncrNX(key string, member Z) *FloatCmd { + const n = 4 + a := make([]interface{}, n+2) + a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx" + return c.zIncr(a, n, member) +} + +// Redis `ZADD key XX INCR score member` command. +func (c *cmdable) ZIncrXX(key string, member Z) *FloatCmd { + const n = 4 + a := make([]interface{}, n+2) + a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx" + return c.zIncr(a, n, member) +} + +func (c *cmdable) ZCard(key string) *IntCmd { + cmd := NewIntCmd("zcard", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZCount(key, min, max string) *IntCmd { + cmd := NewIntCmd("zcount", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZLexCount(key, min, max string) *IntCmd { + cmd := NewIntCmd("zlexcount", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZIncrBy(key string, increment float64, member string) *FloatCmd { + cmd := NewFloatCmd("zincrby", key, increment, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZInterStore(destination string, store ZStore, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "zinterstore" + args[1] = destination + args[2] = len(keys) + for i, key := range keys { + args[3+i] = key + } + if len(store.Weights) > 0 { + args = append(args, "weights") + for _, weight := range store.Weights { + args = append(args, weight) + } + } + if store.Aggregate != "" { + args = append(args, "aggregate", store.Aggregate) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd { + args := []interface{}{ + "zrange", + key, + start, + stop, + } + if withScores { + args = append(args, "withscores") + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRange(key string, start, stop int64) *StringSliceCmd { + return c.zRange(key, start, stop, false) +} + +func (c *cmdable) ZRangeWithScores(key string, start, stop int64) *ZSliceCmd { + cmd := NewZSliceCmd("zrange", key, start, stop, "withscores") + c.process(cmd) + return cmd +} + +type ZRangeBy struct { + Min, Max string + Offset, Count int64 +} + +func (c *cmdable) zRangeBy(zcmd, key string, opt ZRangeBy, withScores bool) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Min, opt.Max} + if withScores { + args = append(args, "withscores") + } + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRangeBy("zrangebyscore", key, opt, false) +} + +func (c *cmdable) ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRangeBy("zrangebylex", key, opt, false) +} + +func (c *cmdable) ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRank(key, member string) *IntCmd { + cmd := NewIntCmd("zrank", key, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRem(key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "zrem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRemRangeByRank(key string, start, stop int64) *IntCmd { + cmd := NewIntCmd( + "zremrangebyrank", + key, + start, + stop, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRemRangeByScore(key, min, max string) *IntCmd { + cmd := NewIntCmd("zremrangebyscore", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRemRangeByLex(key, min, max string) *IntCmd { + cmd := NewIntCmd("zremrangebylex", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRange(key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd("zrevrange", key, start, stop) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd { + cmd := NewZSliceCmd("zrevrange", key, start, stop, "withscores") + c.process(cmd) + return cmd +} + +func (c *cmdable) zRevRangeBy(zcmd, key string, opt ZRangeBy) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Max, opt.Min} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy("zrevrangebyscore", key, opt) +} + +func (c *cmdable) ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy("zrevrangebylex", key, opt) +} + +func (c *cmdable) ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRank(key, member string) *IntCmd { + cmd := NewIntCmd("zrevrank", key, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZScore(key, member string) *FloatCmd { + cmd := NewFloatCmd("zscore", key, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "zunionstore" + args[1] = dest + args[2] = len(keys) + for i, key := range keys { + args[3+i] = key + } + if len(store.Weights) > 0 { + args = append(args, "weights") + for _, weight := range store.Weights { + args = append(args, weight) + } + } + if store.Aggregate != "" { + args = append(args, "aggregate", store.Aggregate) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) PFAdd(key string, els ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(els)) + args[0] = "pfadd" + args[1] = key + args = appendArgs(args, els) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) PFCount(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "pfcount" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) PFMerge(dest string, keys ...string) *StatusCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "pfmerge" + args[1] = dest + for i, key := range keys { + args[2+i] = key + } + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) BgRewriteAOF() *StatusCmd { + cmd := NewStatusCmd("bgrewriteaof") + c.process(cmd) + return cmd +} + +func (c *cmdable) BgSave() *StatusCmd { + cmd := NewStatusCmd("bgsave") + c.process(cmd) + return cmd +} + +func (c *cmdable) ClientKill(ipPort string) *StatusCmd { + cmd := NewStatusCmd("client", "kill", ipPort) + c.process(cmd) + return cmd +} + +// ClientKillByFilter is new style synx, while the ClientKill is old +// CLIENT KILL